path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
doc/zh-tw.ipynb | ###Markdown
[deplacy](https://koichiyasuoka.github.io/deplacy/)句法分析 用[Trankit](https://github.com/nlp-uoregon/trankit)
###Code
!pip install deplacy trankit transformers
import trankit
nlp=trankit.Pipeline("traditional-chinese")
doc=nlp("希望是附麗於存在的,有存在,便有希望,有希望,便是光明。")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
###Output
_____no_output_____
###Markdown
用[Stanza](https://stanfordnlp.github.io/stanza)
###Code
!pip install deplacy stanza
import stanza
stanza.download("zh-hant")
nlp=stanza.Pipeline("zh-hant")
doc=nlp("希望是附麗於存在的,有存在,便有希望,有希望,便是光明。")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
###Output
_____no_output_____
###Markdown
用[UDPipe 2](http://ufal.mff.cuni.cz/udpipe/2)
###Code
!pip install deplacy
def nlp(t):
import urllib.request,urllib.parse,json
with urllib.request.urlopen("https://lindat.mff.cuni.cz/services/udpipe/api/process?model=zh_gsd&tokenizer&tagger&parser&data="+urllib.parse.quote(t)) as r:
return json.loads(r.read())["result"]
doc=nlp("希望是附麗於存在的,有存在,便有希望,有希望,便是光明。")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
###Output
_____no_output_____
###Markdown
用[esupar](https://github.com/KoichiYasuoka/esupar)
###Code
!pip install deplacy esupar
import esupar
nlp=esupar.load("zh")
doc=nlp("希望是附麗於存在的,有存在,便有希望,有希望,便是光明。")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
###Output
_____no_output_____
###Markdown
用[NLP-Cube](https://github.com/Adobe/NLP-Cube)
###Code
!pip install deplacy nlpcube
from cube.api import Cube
nlp=Cube()
nlp.load("zh")
doc=nlp("希望是附麗於存在的,有存在,便有希望,有希望,便是光明。")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
###Output
_____no_output_____
###Markdown
用[spacy-udpipe](https://github.com/TakeLab/spacy-udpipe)
###Code
!pip install deplacy spacy-udpipe
import spacy_udpipe
spacy_udpipe.download("zh")
nlp=spacy_udpipe.load("zh")
doc=nlp("希望是附麗於存在的,有存在,便有希望,有希望,便是光明。")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
###Output
_____no_output_____
###Markdown
用[UD-Chinese](https://pypi.org/project/udchinese)
###Code
!pip install deplacy udchinese
import udchinese
nlp=udchinese.load()
doc=nlp("希望是附麗於存在的,有存在,便有希望,有希望,便是光明。")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
###Output
_____no_output_____
###Markdown
用[spaCy](https://spacy.io/)
###Code
!pip install deplacy
!sudo pip install -U spacy
!sudo python -m spacy download zh_core_web_trf
import pkg_resources,imp
imp.reload(pkg_resources)
import spacy
nlp=spacy.load("zh_core_web_trf")
doc=nlp("希望是附麗於存在的,有存在,便有希望,有希望,便是光明。")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
###Output
_____no_output_____
###Markdown
用[DDParser](https://github.com/baidu/DDParser)
###Code
!pip install deplacy ddparser
from ddparser import DDParser
ddp=DDParser(use_pos=True)
nlp=lambda t:"".join(["\n".join(["\t".join([str(i+1),w,w,p,p,"_",str(h),d,"_","SpaceAfter=No"]) for i,(w,p,h,d) in enumerate(zip(s["word"],s["postag"],s["head"],s["deprel"]))])+"\n\n" for s in ddp.parse(t)])
doc=nlp("希望是附麗於存在的,有存在,便有希望,有希望,便是光明。")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
###Output
_____no_output_____
###Markdown
[deplacy](https://koichiyasuoka.github.io/deplacy/)句法分析 用[Trankit](https://github.com/nlp-uoregon/trankit)
###Code
!pip install deplacy trankit transformers
import trankit
nlp=trankit.Pipeline("traditional-chinese")
doc=nlp("希望是附麗於存在的,有存在,便有希望,有希望,便是光明。")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
###Output
_____no_output_____
###Markdown
用[Stanza](https://stanfordnlp.github.io/stanza)
###Code
!pip install deplacy stanza
import stanza
stanza.download("zh-hant")
nlp=stanza.Pipeline("zh-hant")
doc=nlp("希望是附麗於存在的,有存在,便有希望,有希望,便是光明。")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
###Output
_____no_output_____
###Markdown
用[UDPipe 2](http://ufal.mff.cuni.cz/udpipe/2)
###Code
!pip install deplacy
def nlp(t):
import urllib.request,urllib.parse,json
with urllib.request.urlopen("https://lindat.mff.cuni.cz/services/udpipe/api/process?model=zh_gsd&tokenizer&tagger&parser&data="+urllib.parse.quote(t)) as r:
return json.loads(r.read())["result"]
doc=nlp("希望是附麗於存在的,有存在,便有希望,有希望,便是光明。")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
###Output
_____no_output_____
###Markdown
用[spacy-udpipe](https://github.com/TakeLab/spacy-udpipe)
###Code
!pip install deplacy spacy-udpipe
import spacy_udpipe
spacy_udpipe.download("zh")
nlp=spacy_udpipe.load("zh")
doc=nlp("希望是附麗於存在的,有存在,便有希望,有希望,便是光明。")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
###Output
_____no_output_____
###Markdown
用[UD-Chinese](https://pypi.org/project/udchinese)
###Code
!pip install deplacy udchinese
import udchinese
nlp=udchinese.load()
doc=nlp("希望是附麗於存在的,有存在,便有希望,有希望,便是光明。")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
###Output
_____no_output_____
###Markdown
用[spaCy](https://spacy.io/)
###Code
!pip install deplacy
!sudo pip install -U spacy
!sudo python -m spacy download zh_core_web_trf
import pkg_resources,imp
imp.reload(pkg_resources)
import spacy
nlp=spacy.load("zh_core_web_trf")
doc=nlp("希望是附麗於存在的,有存在,便有希望,有希望,便是光明。")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
###Output
_____no_output_____
###Markdown
用[DDParser](https://github.com/baidu/DDParser)
###Code
!pip install deplacy ddparser
from ddparser import DDParser
ddp=DDParser(use_pos=True)
nlp=lambda t:"".join(["\n".join(["\t".join([str(i+1),w,w,p,p,"_",str(h),d,"_","SpaceAfter=No"]) for i,(w,p,h,d) in enumerate(zip(s["word"],s["postag"],s["head"],s["deprel"]))])+"\n\n" for s in ddp.parse(t)])
doc=nlp("希望是附麗於存在的,有存在,便有希望,有希望,便是光明。")
import deplacy
deplacy.render(doc)
deplacy.serve(doc,port=None)
# import graphviz
# graphviz.Source(deplacy.dot(doc))
###Output
_____no_output_____ |
Course/07-Control-Flow-Statements.ipynb | ###Markdown
*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).**The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).* Control Flow *Control flow* is where the rubber really meets the road in programming.Without it, a program is simply a list of statements that are sequentially executed.With control flow, you can execute certain code blocks conditionally and/or repeatedly: these basic building blocks can be combined to create surprisingly sophisticated programs!Here we'll cover *conditional statements* (including "``if``", "``elif``", and "``else``"), *loop statements* (including "``for``" and "``while``" and the accompanying "``break``", "``continue``", and "``pass``"). Conditional Statements: ``if``-``elif``-``else``:Conditional statements, often referred to as *if-then* statements, allow the programmer to execute certain pieces of code depending on some Boolean condition.A basic example of a Python conditional statement is this:
###Code
x = -15
if x == 0:
print(x, "is zero")
elif x > 0:
print(x, "is positive")
elif x < 0:
print(x, "is negative")
else:
print(x, "is unlike anything I've ever seen...")
###Output
_____no_output_____
###Markdown
Note especially the use of colons (``:``) and whitespace to denote separate blocks of code.Python adopts the ``if`` and ``else`` often used in other languages; its more unique keyword is ``elif``, a contraction of "else if".In these conditional clauses, ``elif`` and ``else`` blocks are optional; additionally, you can optinally include as few or as many ``elif`` statements as you would like. Exercise1. Write code which checks whether a given number ```a``` is a multiple of ```b``` and write your result as a text message.2. A leap year is a year of 366 days in the Gregorian calendar. It is a year whose number is exactly divisible by 4, or, in case of the final year of a century, by 400. Determine whether a given year leapyear is or not. ``for`` loopsLoops in Python are a way to repeatedly execute some code statement.So, for example, if we'd like to print each of the items in a list, we can use a ``for`` loop:
###Code
for N in [2, 3, 5, 7]:
print(N, end=' ') # print all on same line
###Output
_____no_output_____
###Markdown
Notice the simplicity of the ``for`` loop: we specify the variable we want to use, the sequence we want to loop over, and use the "``in``" operator to link them together in an intuitive and readable way.More precisely, the object to the right of the "``in``" can be any Python *iterator*.An iterator can be thought of as a generalized sequence, and we'll discuss them in [Iterators](10-Iterators.ipynb).For example, one of the most commonly-used iterators in Python is the ``range`` object, which generates a sequence of numbers:
###Code
for i in range(10):
print(i, end=' ')
###Output
_____no_output_____
###Markdown
Note that the range starts at zero by default, and that by convention the top of the range is not included in the output.Range objects can also have more complicated values:
###Code
# range from 5 to 10
list(range(5, 10))
# range from 0 to 10 by 2
list(range(0, 10, 2))
###Output
_____no_output_____
###Markdown
You might notice that the meaning of ``range`` arguments is very similar to the slicing syntax that we covered in [Lists](06-Built-in-Data-Structures.ipynbLists).Note that the behavior of ``range()`` is one of the differences between Python 2 and Python 3: in Python 2, ``range()`` produces a list, while in Python 3, ``range()`` produces an iterable object. ``while`` loopsThe other type of loop in Python is a ``while`` loop, which iterates until some condition is met:
###Code
i = 0
while i < 10:
print(i, end=' ')
i += 1
###Output
_____no_output_____
###Markdown
The argument of the ``while`` loop is evaluated as a boolean statement, and the loop is executed until the statement evaluates to False. ``break`` and ``continue``: Fine-Tuning Your LoopsThere are two useful statements that can be used within loops to fine-tune how they are executed:- The ``break`` statement breaks-out of the loop entirely- The ``continue`` statement skips the remainder of the current loop, and goes to the next iterationThese can be used in both ``for`` and ``while`` loops.Here is an example of using ``continue`` to print a string of odd numbers.In this case, the result could be accomplished just as well with an ``if-else`` statement, but sometimes the ``continue`` statement can be a more convenient way to express the idea you have in mind:
###Code
for n in range(20):
# if the remainder of n / 2 is 0, skip the rest of the loop
if n % 2 == 0:
continue
print(n, end=' ')
###Output
_____no_output_____
###Markdown
Here is an example of a ``break`` statement used for a less trivial task.This loop will fill a list with all Fibonacci numbers up to a certain value:
###Code
a, b = 0, 1
amax = 100
L = []
while True:
(a, b) = (b, a + b)
if a > amax:
break
L.append(a)
print(L)
###Output
_____no_output_____
###Markdown
Notice that we use a ``while True`` loop, which will loop forever unless we have a break statement! Loops with an ``else`` BlockOne rarely used pattern available in Python is the ``else`` statement as part of a ``for`` or ``while`` loop.We discussed the ``else`` block earlier: it executes if all the ``if`` and ``elif`` statements evaluate to ``False``.The loop-``else`` is perhaps one of the more confusingly-named statements in Python; I prefer to think of it as a ``nobreak`` statement: that is, the ``else`` block is executed only if the loop ends naturally, without encountering a ``break`` statement.As an example of where this might be useful, consider the following (non-optimized) implementation of the *Sieve of Eratosthenes*, a well-known algorithm for finding prime numbers:
###Code
L = []
nmax = 30
for n in range(2, nmax):
for factor in L:
if n % factor == 0:
break
else: # no break
L.append(n)
print(L)
###Output
_____no_output_____
###Markdown
The ``else`` statement only executes if none of the factors divide the given number.The ``else`` statement works similarly with the ``while`` loop.
###Code
import numpy as np
print('arr = [', end='')
for _ in range (30):
print('%d, '%np.random.choice(100), end='')
print(']')
###Output
_____no_output_____
###Markdown
*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).**The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).* Control Flow *Control flow* is where the rubber really meets the road in programming.Without it, a program is simply a list of statements that are sequentially executed.With control flow, you can execute certain code blocks conditionally and/or repeatedly: these basic building blocks can be combined to create surprisingly sophisticated programs!Here we'll cover *conditional statements* (including "``if``", "``elif``", and "``else``"), *loop statements* (including "``for``" and "``while``" and the accompanying "``break``", "``continue``", and "``pass``"). Conditional Statements: ``if``-``elif``-``else``:Conditional statements, often referred to as *if-then* statements, allow the programmer to execute certain pieces of code depending on some Boolean condition.A basic example of a Python conditional statement is this:
###Code
x = -15
if x == 0:
print(x, "is zero")
elif x > 0:
print(x, "is positive")
elif x < 0:
print(x, "is negative")
else:
print(x, "is unlike anything I've ever seen...")
###Output
_____no_output_____
###Markdown
Note especially the use of colons (``:``) and whitespace to denote separate blocks of code.Python adopts the ``if`` and ``else`` often used in other languages; its more unique keyword is ``elif``, a contraction of "else if".In these conditional clauses, ``elif`` and ``else`` blocks are optional; additionally, you can optinally include as few or as many ``elif`` statements as you would like. Exercise1. Write code which checks whether a given number ```a``` is a multiple of ```b``` and write your result as a text message.2. A leap year is a year of 366 days in the Gregorian calendar. It is a year whose number is exactly divisible by 4, or, in case of the final year of a century, by 400. Determine whether a given year leapyear is or not. ``for`` loopsLoops in Python are a way to repeatedly execute some code statement.So, for example, if we'd like to print each of the items in a list, we can use a ``for`` loop:
###Code
for N in [2, 3, 5, 7]:
print(N, end=' ') # print all on same line
###Output
_____no_output_____
###Markdown
Notice the simplicity of the ``for`` loop: we specify the variable we want to use, the sequence we want to loop over, and use the "``in``" operator to link them together in an intuitive and readable way.More precisely, the object to the right of the "``in``" can be any Python *iterator*.An iterator can be thought of as a generalized sequence, and we'll discuss them in [Iterators](10-Iterators.ipynb).For example, one of the most commonly-used iterators in Python is the ``range`` object, which generates a sequence of numbers:
###Code
for i in range(10):
print(i, end=' ')
###Output
_____no_output_____
###Markdown
Note that the range starts at zero by default, and that by convention the top of the range is not included in the output.Range objects can also have more complicated values:
###Code
# range from 5 to 10
list(range(5, 10))
# range from 0 to 10 by 2
list(range(0, 10, 2))
###Output
_____no_output_____
###Markdown
You might notice that the meaning of ``range`` arguments is very similar to the slicing syntax that we covered in [Lists](06-Built-in-Data-Structures.ipynbLists).Note that the behavior of ``range()`` is one of the differences between Python 2 and Python 3: in Python 2, ``range()`` produces a list, while in Python 3, ``range()`` produces an iterable object. ``while`` loopsThe other type of loop in Python is a ``while`` loop, which iterates until some condition is met:
###Code
i = 0
while i < 10:
print(i, end=' ')
i += 1
###Output
_____no_output_____
###Markdown
The argument of the ``while`` loop is evaluated as a boolean statement, and the loop is executed until the statement evaluates to False. ``break`` and ``continue``: Fine-Tuning Your LoopsThere are two useful statements that can be used within loops to fine-tune how they are executed:- The ``break`` statement breaks-out of the loop entirely- The ``continue`` statement skips the remainder of the current loop, and goes to the next iterationThese can be used in both ``for`` and ``while`` loops.Here is an example of using ``continue`` to print a string of odd numbers.In this case, the result could be accomplished just as well with an ``if-else`` statement, but sometimes the ``continue`` statement can be a more convenient way to express the idea you have in mind:
###Code
for n in range(20):
# if the remainder of n / 2 is 0, skip the rest of the loop
if n % 2 == 0:
continue
print(n, end=' ')
###Output
_____no_output_____
###Markdown
Here is an example of a ``break`` statement used for a less trivial task.This loop will fill a list with all Fibonacci numbers up to a certain value:
###Code
a, b = 0, 1
amax = 100
L = []
while True:
(a, b) = (b, a + b)
if a > amax:
break
L.append(a)
print(L)
###Output
_____no_output_____
###Markdown
Notice that we use a ``while True`` loop, which will loop forever unless we have a break statement! Loops with an ``else`` BlockOne rarely used pattern available in Python is the ``else`` statement as part of a ``for`` or ``while`` loop.We discussed the ``else`` block earlier: it executes if all the ``if`` and ``elif`` statements evaluate to ``False``.The loop-``else`` is perhaps one of the more confusingly-named statements in Python; I prefer to think of it as a ``nobreak`` statement: that is, the ``else`` block is executed only if the loop ends naturally, without encountering a ``break`` statement.As an example of where this might be useful, consider the following (non-optimized) implementation of the *Sieve of Eratosthenes*, a well-known algorithm for finding prime numbers:
###Code
L = []
nmax = 30
for n in range(2, nmax):
for factor in L:
if n % factor == 0:
break
else: # no break
L.append(n)
print(L)
###Output
_____no_output_____ |
notebooks/rtsp_camera/.ipynb_checkpoints/rtsp_camera-checkpoint.ipynb | ###Markdown
Hello Camera Generic (IP) Cameras In this notebook, you can test your camera to make sure it's working on the Jetson Nano as expected. It should already be plugged into the USB camera port. Make sure there is no obstruction on the camera lens such as a film or cover. TipTo execute the Python or system code in the code cells, select the cell and click the "Run" button at the top of the window.Keyboard shortcut: [SHIFT][ENTER] Check to see if the device is availableExecute the following system command to list all video devices on the Jetson Nano. If your camera doesn't show up with a device id, check your connection. You should get an output similar to ```textcrw-rw----+ 1 root video 81, 0 Jun 2 17:35 /dev/video0```
###Code
!ls -ltrh /dev/video*
from jetcam.rtsp_camera import RTSPCamera
###Output
_____no_output_____
###Markdown
Create the camera objectFirst, create a camera object by importing the `USBCamera` class from the library by executing the following Python code cell. Please note, you can only create one `USBCamera` instance. Set the `capture_device=` to the correct number found when you listed the system video devices. If you have `/dev/video0`, then set `capture_device=0`. If you have `/dev/video1`, set `capture_device=1` in the code line below.
###Code
#from jetcam.usb_camera import USBCamera
#TODO change capture_device if incorrect for your system
#camera = RTSPCamera(width=224, height=224, capture_width=640, capture_height=480, capture_device='rtsp://admin:[email protected]:554/cam/realmonitor?channel=1&subtype=1')
#camera = RTSPCamera(width=224, height=224, capture_width=640, capture_height=480, capture_device=1)
camera = RTSPCamera(width=224, height=224, capture_width=640, capture_height=480, capture_device='rtsp://admin:[email protected]:554/cam/realmonitor?channel=1&subtype=1')
###Output
_____no_output_____
###Markdown
We can then capture a frame from the camera with the `read` method.
###Code
image = camera.read()
print(image.shape)
###Output
(224, 224, 3)
###Markdown
Calling the `read` method for `camera` also updates the camera's internal `value`. By looking at the value's `shape`, we see three numbers representing the pixel height, pixel width, and number of color channels.
###Code
print(camera.value.shape)
###Output
(224, 224, 3)
###Markdown
Create a widget to view the image streamWe can create a "widget" to display this image in the notebook. In order to see the image, convert it from its blue-green-red format (brg8) to a format the browser can display (jpeg).
###Code
import ipywidgets
from IPython.display import display
from jetcam.utils import bgr8_to_jpeg
image_widget = ipywidgets.Image(format='jpeg')
image_widget.value = bgr8_to_jpeg(image)
display(image_widget)
###Output
_____no_output_____
###Markdown
You should see an image from the camera if all is working correctly. If there seems to be an image but it's fuzzy or a funny color, check to make sure there is no protective film or cap on the lens. Now let's watch a live stream from the camera. Set the `running` value of the camera to continuously update the value in background. This allows you to attach "callbacks" to the camera value changes.The "callback" here is the function, `update_image`, which is attached by calling the `observe` method below. `update_image` is executed whenever there is a new image available to process, which is then displayed in the widget.
###Code
camera.running = True
def update_image(change):
image = change['new']
image_widget.value = bgr8_to_jpeg(image)
camera.observe(update_image, names='value')
###Output
_____no_output_____
###Markdown
If you move something in front of the camera, you should now see the live video stream in the widget. To stop it, unattach the callback with the `unobserve` method.
###Code
camera.unobserve(update_image, names='value')
###Output
_____no_output_____
###Markdown
TipYou can move the widgets (or any cell) to new window tabs in JupyterLab by right-clicking the cell and selecting "Create New View for Output". This way, you can continue to scroll down the JupyterLab notebook and still see the camera view! Another way to view the image streamYou can also use the traitlets `dlink` method to connect the camera to the widget, using a transform as one of the parameters. This eliminates some steps in the process.
###Code
import traitlets
camera_link = traitlets.dlink((camera, 'value'), (image_widget, 'value'), transform=bgr8_to_jpeg)
###Output
_____no_output_____
###Markdown
You can remove the camera/widget link with the `unlink` method.
###Code
camera_link.unlink()
###Output
_____no_output_____
###Markdown
... and reconnect it again with `link`.
###Code
camera_link.link()
###Output
_____no_output_____ |
Data Processing/Question 1- 8/Spark Tutorial.ipynb | ###Markdown
Data Manipulation Section -> Dataframe-> Reading Dataset-> Checking Datatypes(Schema)-> Indexing and Pandas Data Manipulation-> Columns adding & Dropping
###Code
df_pyspark.show(5)
sal=spark.read.option('header','true').csv('salary.csv')
#sal.select('salary')
sal.select('salary').show(5)
sal.dtypes
sal.describe().show()
#Adding Column in DataFarme
sal.withColumn('Premiere Salary',sal['salary']+20000).show(5)
#Dropping the Column
sal=sal.drop('Premiere Salary')
#Column Rename
sal.withColumnRenamed('yearsrank','Rank').show(5)
###Output
+------+-----------+----+-----------+------+--------+-----+
|salary|yearsworked|Rank| market|degree|position|Field|
+------+-----------+----+-----------+------+--------+-----+
| 53000| 0| 0|1.169999957| 1| 1| 3|
| 58000| 0| 0| 1.24000001| 1| 1| 2|
| 45500| 0| 0|1.210000038| 1| 1| 3|
| 35782| 2| 1| 0.99000001| 1| 1| 4|
| 34731| 2| 2|0.910000026| 1| 1| 4|
+------+-----------+----+-----------+------+--------+-----+
only showing top 5 rows
###Markdown
Pyspark Handling Missing Values --> Dropping Columns--> Dropping Rows-->Various Parameter in Dropping Functionalities--> Handling Missing values by Mean, Median and Mode
###Code
sal1=spark.read.csv('salary.csv',header=True,inferSchema=True)
sal1.show(5)
#Dropping the Columns
sal1.drop('yearsworked').show(5)
#Dropping data which don't have any data
sal1.na.drop().show()
#Thershold
sal1.na.drop(how="any",thresh=3).show()
#Subset elimination of Null (is you select "market" all the null rows in market field will be eliminated)
sal1.na.drop(how="any",subset=['yearsrank']).show()
#sal1.na.fill(missing Values').show()
#filling the Missing Value
#sal1.na.fill('Missing Values').show() In all columns
#sal1.na.fill('Missing Values',['yearsrank','salary']).show()
#from pyspark.ml.feature import Imputer
#imputer=Imputer(
###Output
_____no_output_____ |
GrupoBimboV1/Classifying Client Type using Client Names.ipynb | ###Markdown
The methods used to generate the filter terms involved looking at the most frequent words/client names for clues about what types of establishments were mentioned in this data set. Developing the filters took a fair amount of "human" sleuthing while looking at TF-IDF scores, frequency counts of Client names, as well as just general knowledge of common Spanish words used to refer to certain establishment types. Especially with all the noise provided by the Clients referred to only by a proper name, the filtered data is a proportionally small figure- but, significant.
###Code
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
#print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
# Load in the Client Name data
# Make sure all names uppercase (there are some mixed instances)
pd.set_option('display.max_rows', 50)
vf = pd.read_csv('../input/cliente_tabla.csv',header=0)
vf['NombreCliente'] = vf['NombreCliente'].str.upper()
# Begin with a scan of the Client Name Data based on Top Frequency Client Names
# Notice there are a lot of Proper Names
vf['NombreCliente'].value_counts()[0:200]
# Let's also generate a list of individual word frequency across all names
def tfidf_score_list(vf2, list_len):
from sklearn.feature_extraction.text import TfidfVectorizer
v = TfidfVectorizer()
vf2['New'] = 'na'
a = " ".join(vf2['NombreCliente'])
vf2['New'][0] = a
tfidf = v.fit_transform(vf2['New'])
feature_names = v.get_feature_names()
freq = []
doc = 0
feature_index = tfidf[doc,:].nonzero()[1]
tfidf_scores = zip(feature_index, [tfidf[doc, x] for x in feature_index])
for w, s in [(feature_names[i], s) for (i, s) in tfidf_scores]:
freq.append((w.encode('utf-8'),s))
del vf2['New']
import numpy as np
names = ['word','score']
formats = ['S50','f8']
dtype = dict(names = names, formats=formats)
array = np.array(freq, dtype=dtype)
b = np.sort(array, order='score')
if list_len > len(b)+1:
list_len = len(b)+1
for i in range(1,list_len):
print(b[-i])
tfidf_score_list(vf, 200)
###Output
/opt/conda/lib/python3.5/site-packages/ipykernel/__main__.py:8: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
###Markdown
Again, notice the prevalence of personal names. By looking at a long enough list, however, we can start to see some other useful terms appear such as particles of speech (i.e. 'ag', 'los', 'san') or more useful words like "super", "oxxo", "mini", "comodin". If I found a word that I thought represent a type of establishment, I'd double check it by doing a single search. If that search was fruitful and had a good amount of relevant results, I'd add it to my filter. An example of a single term search is below:
###Code
print(vf[vf['NombreCliente'].str.contains('.*CAFE.*')])
###Output
Cliente_ID NombreCliente
78 1438 CAFETRIA PREPARATORIA
1095 5045 CAFETERIA DE LA SECUNDARIA 13
1098 5048 CAFETERIA PREPA 2
1233 5416 CAFETERIA
1318 5612 CAFETERIA NORMAL DE PROFESORES
1446 5908 CAFETERIA FACULTAD DE PSICOLOGIA
2005 7463 CAFETERIA DE LA UAEM
2150 7922 ICATI CAFETERIA
2306 8317 CAFETERIA FACULTAD DE DERECHO
2308 8319 CAFETERIA FACULTAD DE ADMINISTRACION
2573 8979 CAFETERIA
2864 9956 LA CAFE
3147 10753 CAFETERIA LA CARRETA
3207 10844 CAFETERIA EL JUBILEO
3209 10846 CAFETERIA LA CENTRAL
3240 10913 CAFETERIA DON ARTURO
3243 10918 CAFETERIA COLISEO
3701 12031 CAFE PARIS
4346 13751 CAFETERIA FACULTAD
4766 15397 RUSTICO CAFE
4827 15629 CAFE LA TORRE
5491 17024 CAFETERIA PREPA 7 VESPERTINO
5597 17252 CAFETERIA HOSPITAL TRINIDAD
5910 17912 CAFE EL NUEVO HUATUSCO
5926 17965 CAFE P Z
... ... ...
928449 8210873 AEROCAFE
929049 8234198 CAFE INTERNET PACIFICO
929110 8236711 CAFETERIA FRAGOS
929372 8250865 CIBER CAFE NINO
929511 8266059 CAFETERIA LORAY
929792 8276937 CAFETERIA ADYS
929895 8280710 RESTAURANTE CAFE LA FIESTA
930155 8293191 EL NUEVO WILLIS CAFE
930235 8295611 CIBER CAFE MUNDO VIRTUAL
931042 8390606 CAFETERIA ESTHER INT CONALEP
931043 8390606 CAFETERIA ESTHER INT CONALEP
931148 8397676 CAFE RUBENS
931826 8565291 CAFE SAN RAFAEL
932328 9484515 SDN CAFETERIA2 2DA ZONA MILITAR
932581 9506908 CAFE TINA
932801 9521368 CAFETERIA MIX
932977 9537141 CAFE LUNNAH
933415 9574912 CAFETERIA CLUB CHIVAS
933907 9622671 CAFE LA GLORIA
934245 9655254 CAFETERIA EXTENCION RIOS
934488 9678492 CAFETERIA LA CASA VIEJA
934720 9693686 SECUNDARIA 7 CAFETERIA 2
934786 9702948 NUEVO CAFE AZTECA
934942 9711388 CAFETERIA SALOM
935214 9746888 CAFETERIA LA VACA
[2508 rows x 2 columns]
###Markdown
The result is a filter derived from hand-picking the best, most common, most interesting terms I could determine.
###Code
# --- Begin Filtering for specific terms
# Note that the order of filtering is significant.
# For example:
# The regex of .*ERIA.* will assign "FRUITERIA" to 'Eatery' rather than 'Fresh Market'.
# In other words, the first filters to occur have a bigger priority.
def filter_specific(vf2):
# Known Large Company / Special Group Types
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*REMISION.*','Consignment')
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*WAL MART.*','.*SAMS CLUB.*'],'Walmart', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*OXXO.*','Oxxo Store')
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*CONASUPO.*','Govt Store')
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*BIMBO.*','Bimbo Store')
# General term search for a random assortment of words I picked from looking at
# their frequency of appearance in the data and common spanish words for these categories
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*COLEG.*','.*UNIV.*','.*ESCU.*','.*INSTI.*',\
'.*PREPAR.*'],'School', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*PUESTO.*','Post')
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*FARMA.*','.*HOSPITAL.*','.*CLINI.*'],'Hospital/Pharmacy', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*CAFE.*','.*CREMERIA.*','.*DULCERIA.*',\
'.*REST.*','.*BURGER.*','.*TACO.*', '.*TORTA.*',\
'.*TAQUER.*','.*HOT DOG.*',\
'.*COMEDOR.*', '.*ERIA.*','.*BURGU.*'],'Eatery', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].str.replace('.*SUPER.*','Supermarket')
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*COMERCIAL.*','.*BODEGA.*','.*DEPOSITO.*',\
'.*ABARROTES.*','.*MERCADO.*','.*CAMBIO.*',\
'.*MARKET.*','.*MART .*','.*MINI .*',\
'.*PLAZA.*','.*MISC.*','.*ELEVEN.*','.*EXP.*',\
'.*SNACK.*', '.*PAPELERIA.*', '.*CARNICERIA.*',\
'.*LOCAL.*','.*COMODIN.*','.*PROVIDENCIA.*'
],'General Market/Mart'\
, regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*VERDU.*','.*FRUT.*'],'Fresh Market', regex=True)
vf2['NombreCliente'] = vf2['NombreCliente'].replace(['.*HOTEL.*','.*MOTEL.*'],'Hotel', regex=True)
filter_specific(vf)
# --- Begin filtering for more general terms
# The idea here is to look for names with particles of speech that would
# not appear in a person's name.
# i.e. "Individuals" should not contain any participles or numbers in their names.
def filter_participle(vf2):
vf2['NombreCliente'] = vf2['NombreCliente'].replace([
'.*LA .*','.*EL .*','.*DE .*','.*LOS .*','.*DEL .*','.*Y .*', '.*SAN .*', '.*SANTA .*',\
'.*AG .*','.*LAS .*','.*MI .*','.*MA .*', '.*II.*', '.*[0-9]+.*'\
],'Small Franchise', regex=True)
filter_participle(vf)
# Any remaining entries should be "Individual" Named Clients, there are some outliers.
# More specific filters could be used in order to reduce the percentage of outliers in this final set.
def filter_remaining(vf2):
def function_word(data):
# Avoid the single-words created so far by checking for upper-case
if (data.isupper()) and (data != "NO IDENTIFICADO"):
return 'Individual'
else:
return data
vf2['NombreCliente'] = vf2['NombreCliente'].map(function_word)
filter_remaining(vf)
###Output
_____no_output_____
###Markdown
With the filtering complete, let's look at the breakdown of how the data is classified now:
###Code
vf['NombreCliente'].value_counts()
###Output
_____no_output_____
###Markdown
Finally, we can apply these new tags on the actual Training and Test data sets that have been provided!
###Code
#trdf = pd.read_csv('../input/train.csv',header=0)
#trdf_stores = trdf.merge(vf.drop_duplicates(subset=['Cliente_ID']), how="left")
#tsdf = pd.read_csv('../input/test.csv',header=0)
#tsdf_stores = tsdf.merge(vf.drop_duplicates(subset=['Cliente_ID']), how="left")
###Output
_____no_output_____
###Markdown
Write the data to file to save it for a new session....
###Code
#trdf.to_csv('../output/train_with_cnames.csv')
#tsdf.to_csv('../output/test_with_cnames.csv')
###Output
_____no_output_____ |
2018/helsinki/slides/Preprocessing_textual_data.ipynb | ###Markdown
Power of GREPSee manual [here](http://www.rstudio.com/wp-content/uploads/2016/09/RegExCheatsheet.pdf).
###Code
gsub("what to change", "what to replace", "what to change in this text")
gsub("\t", "", uglytext)
gsub("[^[:alpha:]]", " ", uglytext)
###Output
_____no_output_____
###Markdown
Working with a corpus and document-term-matrix
###Code
## Loading example text data
load(url("https://cbail.github.io/Trump_Tweets.Rdata"))
head(trumptweets)
library(tm)
trump_corpus <- Corpus(VectorSource( trumptweets$text) )
###Output
_____no_output_____
###Markdown
Other ways to use read your corpus:``Corpus( DirSource(directory = "directory") )````Corpus( VectorSource( dataframe$variable ) )``
###Code
library(tidytext)
library(dplyr)
tidy_trump_tweets<- trumptweets %>%
select(created_at,text) %>%
unnest_tokens("word", text)
head( tidy_trump_tweets )
tidy_trump_tweets %>%
count(word) %>%
arrange(desc(n))
data("stop_words")
tidy_trump_tweets<-tidy_trump_tweets %>%
anti_join(stop_words)
tidy_trump_tweets<-tidy_trump_tweets[-grep("\\b\\d+\\b", tidy_trump_tweets$word),]
tidy_trump_tweets$word <- gsub("\\s+","",tidy_trump_tweets$word)
## remove some extra nonsense
# tidy_trump_tweets$word <- gsub("https","",tidy_trump_tweets$word)
# tidy_trump_tweets$word <- gsub("rt "," ",tidy_trump_tweets$word)
library(SnowballC)
tidy_trump_tweets<-tidy_trump_tweets %>%
mutate_at("word", funs(wordStem((.), language="en")))
tidy_trump_tweets %>%
count(word) %>%
arrange(desc(n))
tidy_trump_DTM <-
tidy_trump_tweets %>%
count(created_at, word) %>%
cast_dtm(created_at, word, n)
tidy_trump_DTM
tidy_trump_tfidf<- trumptweets %>%
select(created_at,text) %>%
unnest_tokens("word", text) %>%
anti_join(stop_words) %>%
count(word, created_at) %>%
bind_tf_idf(word, created_at, n)
top_tfidf<-tidy_trump_tfidf %>%
arrange(desc(tf_idf))
top_tfidf$word[1]
###Output
_____no_output_____ |
1. Introduction to TensorFlow/Udemy/00-Keras-Syntax-Basics.ipynb | ###Markdown
Keras Syntax BasicsWith TensorFlow 2.0 , Keras is now the main API choice. Let's work through a simple regression project to understand the basics of the Keras syntax and adding layers. The DataTo learn the basic syntax of Keras, we will use a very simple fake data set, in the subsequent lectures we will focus on real datasets, along with feature engineering! For now, let's focus on the syntax of TensorFlow 2.0.Let's pretend this data are measurements of some rare gem stones, with 2 measurement features and a sale price. Our final goal would be to try to predict the sale price of a new gem stone we just mined from the ground, in order to try to set a fair price in the market. Load the Data
###Code
import pandas as pd
df = pd.read_csv('../DATA/fake_reg.csv')
df.head()
###Output
_____no_output_____
###Markdown
Explore the dataLet's take a quick look, we should see strong correlation between the features and the "price" of this made up product.
###Code
import seaborn as sns
import matplotlib.pyplot as plt
sns.pairplot(df)
###Output
_____no_output_____
###Markdown
Feel free to visualize more, but this data is fake, so we will focus on feature engineering and exploratory data analysis later on in the course in much more detail! Test/Train Split
###Code
from sklearn.model_selection import train_test_split
# Convert Pandas to Numpy for Keras
# Features
X = df[['feature1','feature2']].values
# Label
y = df['price'].values
# Split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=42)
X_train.shape
X_test.shape
y_train.shape
y_test.shape
###Output
_____no_output_____
###Markdown
Normalizing/Scaling the DataWe scale the feature data.[Why we don't need to scale the label](https://stats.stackexchange.com/questions/111467/is-it-necessary-to-scale-the-target-value-in-addition-to-scaling-features-for-re)
###Code
from sklearn.preprocessing import MinMaxScaler
help(MinMaxScaler)
scaler = MinMaxScaler()
# Notice to prevent data leakage from the test set, we only fit our scaler to the training set
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
###Output
_____no_output_____
###Markdown
TensorFlow 2.0 Syntax Import OptionsThere are several ways you can import Keras from Tensorflow (this is hugely a personal style choice, please use any import methods you prefer). We will use the method shown in the **official TF documentation**.
###Code
import tensorflow as tf
from tensorflow.keras.models import Sequential
help(Sequential)
###Output
Help on class Sequential in module tensorflow.python.keras.engine.sequential:
class Sequential(tensorflow.python.keras.engine.training.Model)
| Sequential(layers=None, name=None)
|
| Linear stack of layers.
|
| Arguments:
| layers: list of layers to add to the model.
|
| Example:
|
| ```python
| # Optionally, the first layer can receive an `input_shape` argument:
| model = Sequential()
| model.add(Dense(32, input_shape=(500,)))
| # Afterwards, we do automatic shape inference:
| model.add(Dense(32))
|
| # This is identical to the following:
| model = Sequential()
| model.add(Dense(32, input_dim=500))
|
| # And to the following:
| model = Sequential()
| model.add(Dense(32, batch_input_shape=(None, 500)))
|
| # Note that you can also omit the `input_shape` argument:
| # In that case the model gets built the first time you call `fit` (or other
| # training and evaluation methods).
| model = Sequential()
| model.add(Dense(32))
| model.add(Dense(32))
| model.compile(optimizer=optimizer, loss=loss)
| # This builds the model for the first time:
| model.fit(x, y, batch_size=32, epochs=10)
|
| # Note that when using this delayed-build pattern (no input shape specified),
| # the model doesn't have any weights until the first call
| # to a training/evaluation method (since it isn't yet built):
| model = Sequential()
| model.add(Dense(32))
| model.add(Dense(32))
| model.weights # returns []
|
| # Whereas if you specify the input shape, the model gets built continuously
| # as you are adding layers:
| model = Sequential()
| model.add(Dense(32, input_shape=(500,)))
| model.add(Dense(32))
| model.weights # returns list of length 4
|
| # When using the delayed-build pattern (no input shape specified), you can
| # choose to manually build your model by calling `build(batch_input_shape)`:
| model = Sequential()
| model.add(Dense(32))
| model.add(Dense(32))
| model.build((None, 500))
| model.weights # returns list of length 4
| ```
|
| Method resolution order:
| Sequential
| tensorflow.python.keras.engine.training.Model
| tensorflow.python.keras.engine.network.Network
| tensorflow.python.keras.engine.base_layer.Layer
| tensorflow.python.module.module.Module
| tensorflow.python.training.tracking.tracking.AutoTrackable
| tensorflow.python.training.tracking.base.Trackable
| builtins.object
|
| Methods defined here:
|
| __init__(self, layers=None, name=None)
|
| add(self, layer)
| Adds a layer instance on top of the layer stack.
|
| Arguments:
| layer: layer instance.
|
| Raises:
| TypeError: If `layer` is not a layer instance.
| ValueError: In case the `layer` argument does not
| know its input shape.
| ValueError: In case the `layer` argument has
| multiple output tensors, or is already connected
| somewhere else (forbidden in `Sequential` models).
|
| build(self, input_shape=None)
| Builds the model based on input shapes received.
|
| This is to be used for subclassed models, which do not know at instantiation
| time what their inputs look like.
|
| This method only exists for users who want to call `model.build()` in a
| standalone way (as a substitute for calling the model on real data to
| build it). It will never be called by the framework (and thus it will
| never throw unexpected errors in an unrelated workflow).
|
| Args:
| input_shape: Single tuple, TensorShape, or list of shapes, where shapes
| are tuples, integers, or TensorShapes.
|
| Raises:
| ValueError:
| 1. In case of invalid user-provided data (not of type tuple,
| list, or TensorShape).
| 2. If the model requires call arguments that are agnostic
| to the input shapes (positional or kwarg in call signature).
| 3. If not all layers were properly built.
| 4. If float type inputs are not supported within the layers.
|
| In each of these cases, the user should build their model by calling it
| on real tensor data.
|
| call(self, inputs, training=None, mask=None)
| Calls the model on new inputs.
|
| In this case `call` just reapplies
| all ops in the graph to the new inputs
| (e.g. build a new computational graph from the provided inputs).
|
| Arguments:
| inputs: A tensor or list of tensors.
| training: Boolean or boolean scalar tensor, indicating whether to run
| the `Network` in training mode or inference mode.
| mask: A mask or list of masks. A mask can be
| either a tensor or None (no mask).
|
| Returns:
| A tensor if there is a single output, or
| a list of tensors if there are more than one outputs.
|
| compute_mask(self, inputs, mask)
| Computes an output mask tensor.
|
| Arguments:
| inputs: Tensor or list of tensors.
| mask: Tensor or list of tensors.
|
| Returns:
| None or a tensor (or list of tensors,
| one per output tensor of the layer).
|
| compute_output_shape(self, input_shape)
| Computes the output shape of the layer.
|
| If the layer has not been built, this method will call `build` on the
| layer. This assumes that the layer will later be used with inputs that
| match the input shape provided here.
|
| Arguments:
| input_shape: Shape tuple (tuple of integers)
| or list of shape tuples (one per output tensor of the layer).
| Shape tuples can include None for free dimensions,
| instead of an integer.
|
| Returns:
| An input shape tuple.
|
| get_config(self)
| Returns the config of the layer.
|
| A layer config is a Python dictionary (serializable)
| containing the configuration of a layer.
| The same layer can be reinstantiated later
| (without its trained weights) from this configuration.
|
| The config of a layer does not include connectivity
| information, nor the layer class name. These are handled
| by `Network` (one layer of abstraction above).
|
| Returns:
| Python dictionary.
|
| pop(self)
| Removes the last layer in the model.
|
| Raises:
| TypeError: if there are no layers in the model.
|
| predict_classes(self, x, batch_size=32, verbose=0)
| Generate class predictions for the input samples.
|
| The input samples are processed batch by batch.
|
| Arguments:
| x: input data, as a Numpy array or list of Numpy arrays
| (if the model has multiple inputs).
| batch_size: integer.
| verbose: verbosity mode, 0 or 1.
|
| Returns:
| A numpy array of class predictions.
|
| predict_proba(self, x, batch_size=32, verbose=0)
| Generates class probability predictions for the input samples.
|
| The input samples are processed batch by batch.
|
| Arguments:
| x: input data, as a Numpy array or list of Numpy arrays
| (if the model has multiple inputs).
| batch_size: integer.
| verbose: verbosity mode, 0 or 1.
|
| Returns:
| A Numpy array of probability predictions.
|
| ----------------------------------------------------------------------
| Class methods defined here:
|
| from_config(config, custom_objects=None) from builtins.type
| Instantiates a Model from its config (output of `get_config()`).
|
| Arguments:
| config: Model config dictionary.
| custom_objects: Optional dictionary mapping names
| (strings) to custom classes or functions to be
| considered during deserialization.
|
| Returns:
| A model instance.
|
| Raises:
| ValueError: In case of improperly formatted config dict.
|
| ----------------------------------------------------------------------
| Data descriptors defined here:
|
| dynamic
|
| input_spec
| Gets the network's input specs.
|
| Returns:
| A list of `InputSpec` instances (one per input to the model)
| or a single instance if the model has only one input.
|
| layers
|
| ----------------------------------------------------------------------
| Methods inherited from tensorflow.python.keras.engine.training.Model:
|
| compile(self, optimizer='rmsprop', loss=None, metrics=None, loss_weights=None, sample_weight_mode=None, weighted_metrics=None, target_tensors=None, distribute=None, **kwargs)
| Configures the model for training.
|
| Arguments:
| optimizer: String (name of optimizer) or optimizer instance.
| See `tf.keras.optimizers`.
| loss: String (name of objective function), objective function or
| `tf.losses.Loss` instance. See `tf.losses`. If the model has
| multiple outputs, you can use a different loss on each output by
| passing a dictionary or a list of losses. The loss value that will
| be minimized by the model will then be the sum of all individual
| losses.
| metrics: List of metrics to be evaluated by the model during training
| and testing. Typically you will use `metrics=['accuracy']`.
| To specify different metrics for different outputs of a
| multi-output model, you could also pass a dictionary, such as
| `metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}`.
| You can also pass a list (len = len(outputs)) of lists of metrics
| such as `metrics=[['accuracy'], ['accuracy', 'mse']]` or
| `metrics=['accuracy', ['accuracy', 'mse']]`.
| loss_weights: Optional list or dictionary specifying scalar
| coefficients (Python floats) to weight the loss contributions
| of different model outputs.
| The loss value that will be minimized by the model
| will then be the *weighted sum* of all individual losses,
| weighted by the `loss_weights` coefficients.
| If a list, it is expected to have a 1:1 mapping
| to the model's outputs. If a tensor, it is expected to map
| output names (strings) to scalar coefficients.
| sample_weight_mode: If you need to do timestep-wise
| sample weighting (2D weights), set this to `"temporal"`.
| `None` defaults to sample-wise weights (1D).
| If the model has multiple outputs, you can use a different
| `sample_weight_mode` on each output by passing a
| dictionary or a list of modes.
| weighted_metrics: List of metrics to be evaluated and weighted
| by sample_weight or class_weight during training and testing.
| target_tensors: By default, Keras will create placeholders for the
| model's target, which will be fed with the target data during
| training. If instead you would like to use your own
| target tensors (in turn, Keras will not expect external
| Numpy data for these targets at training time), you
| can specify them via the `target_tensors` argument. It can be
| a single tensor (for a single-output model), a list of tensors,
| or a dict mapping output names to target tensors.
| distribute: NOT SUPPORTED IN TF 2.0, please create and compile the
| model under distribution strategy scope instead of passing it to
| compile.
| **kwargs: Any additional arguments.
|
| Raises:
| ValueError: In case of invalid arguments for
| `optimizer`, `loss`, `metrics` or `sample_weight_mode`.
|
| evaluate(self, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False)
| Returns the loss value & metrics values for the model in test mode.
|
| Computation is done in batches.
|
| Arguments:
| x: Input data. It could be:
| - A Numpy array (or array-like), or a list of arrays
| (in case the model has multiple inputs).
| - A TensorFlow tensor, or a list of tensors
| (in case the model has multiple inputs).
| - A dict mapping input names to the corresponding array/tensors,
| if the model has named inputs.
| - A `tf.data` dataset.
| - A generator or `keras.utils.Sequence` instance.
| y: Target data. Like the input data `x`,
| it could be either Numpy array(s) or TensorFlow tensor(s).
| It should be consistent with `x` (you cannot have Numpy inputs and
| tensor targets, or inversely).
| If `x` is a dataset, generator or
| `keras.utils.Sequence` instance, `y` should not be specified (since
| targets will be obtained from the iterator/dataset).
| batch_size: Integer or `None`.
| Number of samples per gradient update.
| If unspecified, `batch_size` will default to 32.
| Do not specify the `batch_size` is your data is in the
| form of symbolic tensors, dataset,
| generators, or `keras.utils.Sequence` instances (since they generate
| batches).
| verbose: 0 or 1. Verbosity mode.
| 0 = silent, 1 = progress bar.
| sample_weight: Optional Numpy array of weights for
| the test samples, used for weighting the loss function.
| You can either pass a flat (1D)
| Numpy array with the same length as the input samples
| (1:1 mapping between weights and samples),
| or in the case of temporal data,
| you can pass a 2D array with shape
| `(samples, sequence_length)`,
| to apply a different weight to every timestep of every sample.
| In this case you should make sure to specify
| `sample_weight_mode="temporal"` in `compile()`. This argument is not
| supported when `x` is a dataset, instead pass
| sample weights as the third element of `x`.
| steps: Integer or `None`.
| Total number of steps (batches of samples)
| before declaring the evaluation round finished.
| Ignored with the default value of `None`.
| If x is a `tf.data` dataset and `steps` is
| None, 'evaluate' will run until the dataset is exhausted.
| This argument is not supported with array inputs.
| callbacks: List of `keras.callbacks.Callback` instances.
| List of callbacks to apply during evaluation.
| See [callbacks](/api_docs/python/tf/keras/callbacks).
| max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
| input only. Maximum size for the generator queue.
| If unspecified, `max_queue_size` will default to 10.
| workers: Integer. Used for generator or `keras.utils.Sequence` input
| only. Maximum number of processes to spin up when using
| process-based threading. If unspecified, `workers` will default
| to 1. If 0, will execute the generator on the main thread.
| use_multiprocessing: Boolean. Used for generator or
| `keras.utils.Sequence` input only. If `True`, use process-based
| threading. If unspecified, `use_multiprocessing` will default to
| `False`. Note that because this implementation relies on
| multiprocessing, you should not pass non-picklable arguments to
| the generator as they can't be passed easily to children processes.
|
| Returns:
| Scalar test loss (if the model has a single output and no metrics)
| or list of scalars (if the model has multiple outputs
| and/or metrics). The attribute `model.metrics_names` will give you
| the display labels for the scalar outputs.
|
| Raises:
| ValueError: in case of invalid arguments.
|
| evaluate_generator(self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0)
| Evaluates the model on a data generator.
|
| The generator should return the same kind of data
| as accepted by `test_on_batch`.
|
| Arguments:
| generator: Generator yielding tuples (inputs, targets)
| or (inputs, targets, sample_weights)
| or an instance of `keras.utils.Sequence`
| object in order to avoid duplicate data
| when using multiprocessing.
| steps: Total number of steps (batches of samples)
| to yield from `generator` before stopping.
| Optional for `Sequence`: if unspecified, will use
| the `len(generator)` as a number of steps.
| callbacks: List of `keras.callbacks.Callback` instances.
| List of callbacks to apply during evaluation.
| See [callbacks](/api_docs/python/tf/keras/callbacks).
| max_queue_size: maximum size for the generator queue
| workers: Integer. Maximum number of processes to spin up
| when using process-based threading.
| If unspecified, `workers` will default to 1. If 0, will
| execute the generator on the main thread.
| use_multiprocessing: Boolean.
| If `True`, use process-based threading.
| If unspecified, `use_multiprocessing` will default to `False`.
| Note that because this implementation relies on multiprocessing,
| you should not pass non-picklable arguments to the generator
| as they can't be passed easily to children processes.
| verbose: Verbosity mode, 0 or 1.
|
| Returns:
| Scalar test loss (if the model has a single output and no metrics)
| or list of scalars (if the model has multiple outputs
| and/or metrics). The attribute `model.metrics_names` will give you
| the display labels for the scalar outputs.
|
| Raises:
| ValueError: in case of invalid arguments.
|
| Raises:
| ValueError: In case the generator yields data in an invalid format.
|
| fit(self, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, max_queue_size=10, workers=1, use_multiprocessing=False, **kwargs)
| Trains the model for a fixed number of epochs (iterations on a dataset).
|
| Arguments:
| x: Input data. It could be:
| - A Numpy array (or array-like), or a list of arrays
| (in case the model has multiple inputs).
| - A TensorFlow tensor, or a list of tensors
| (in case the model has multiple inputs).
| - A dict mapping input names to the corresponding array/tensors,
| if the model has named inputs.
| - A `tf.data` dataset. Should return a tuple
| of either `(inputs, targets)` or
| `(inputs, targets, sample_weights)`.
| - A generator or `keras.utils.Sequence` returning `(inputs, targets)`
| or `(inputs, targets, sample weights)`.
| y: Target data. Like the input data `x`,
| it could be either Numpy array(s) or TensorFlow tensor(s).
| It should be consistent with `x` (you cannot have Numpy inputs and
| tensor targets, or inversely). If `x` is a dataset, generator,
| or `keras.utils.Sequence` instance, `y` should
| not be specified (since targets will be obtained from `x`).
| batch_size: Integer or `None`.
| Number of samples per gradient update.
| If unspecified, `batch_size` will default to 32.
| Do not specify the `batch_size` if your data is in the
| form of symbolic tensors, datasets,
| generators, or `keras.utils.Sequence` instances (since they generate
| batches).
| epochs: Integer. Number of epochs to train the model.
| An epoch is an iteration over the entire `x` and `y`
| data provided.
| Note that in conjunction with `initial_epoch`,
| `epochs` is to be understood as "final epoch".
| The model is not trained for a number of iterations
| given by `epochs`, but merely until the epoch
| of index `epochs` is reached.
| verbose: 0, 1, or 2. Verbosity mode.
| 0 = silent, 1 = progress bar, 2 = one line per epoch.
| Note that the progress bar is not particularly useful when
| logged to a file, so verbose=2 is recommended when not running
| interactively (eg, in a production environment).
| callbacks: List of `keras.callbacks.Callback` instances.
| List of callbacks to apply during training.
| See `tf.keras.callbacks`.
| validation_split: Float between 0 and 1.
| Fraction of the training data to be used as validation data.
| The model will set apart this fraction of the training data,
| will not train on it, and will evaluate
| the loss and any model metrics
| on this data at the end of each epoch.
| The validation data is selected from the last samples
| in the `x` and `y` data provided, before shuffling. This argument is
| not supported when `x` is a dataset, generator or
| `keras.utils.Sequence` instance.
| validation_data: Data on which to evaluate
| the loss and any model metrics at the end of each epoch.
| The model will not be trained on this data.
| `validation_data` will override `validation_split`.
| `validation_data` could be:
| - tuple `(x_val, y_val)` of Numpy arrays or tensors
| - tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays
| - dataset
| For the first two cases, `batch_size` must be provided.
| For the last case, `validation_steps` must be provided.
| shuffle: Boolean (whether to shuffle the training data
| before each epoch) or str (for 'batch').
| 'batch' is a special option for dealing with the
| limitations of HDF5 data; it shuffles in batch-sized chunks.
| Has no effect when `steps_per_epoch` is not `None`.
| class_weight: Optional dictionary mapping class indices (integers)
| to a weight (float) value, used for weighting the loss function
| (during training only).
| This can be useful to tell the model to
| "pay more attention" to samples from
| an under-represented class.
| sample_weight: Optional Numpy array of weights for
| the training samples, used for weighting the loss function
| (during training only). You can either pass a flat (1D)
| Numpy array with the same length as the input samples
| (1:1 mapping between weights and samples),
| or in the case of temporal data,
| you can pass a 2D array with shape
| `(samples, sequence_length)`,
| to apply a different weight to every timestep of every sample.
| In this case you should make sure to specify
| `sample_weight_mode="temporal"` in `compile()`. This argument is not
| supported when `x` is a dataset, generator, or
| `keras.utils.Sequence` instance, instead provide the sample_weights
| as the third element of `x`.
| initial_epoch: Integer.
| Epoch at which to start training
| (useful for resuming a previous training run).
| steps_per_epoch: Integer or `None`.
| Total number of steps (batches of samples)
| before declaring one epoch finished and starting the
| next epoch. When training with input tensors such as
| TensorFlow data tensors, the default `None` is equal to
| the number of samples in your dataset divided by
| the batch size, or 1 if that cannot be determined. If x is a
| `tf.data` dataset, and 'steps_per_epoch'
| is None, the epoch will run until the input dataset is exhausted.
| This argument is not supported with array inputs.
| validation_steps: Only relevant if `validation_data` is provided and
| is a `tf.data` dataset. Total number of steps (batches of
| samples) to draw before stopping when performing validation
| at the end of every epoch. If validation_data is a `tf.data` dataset
| and 'validation_steps' is None, validation
| will run until the `validation_data` dataset is exhausted.
| validation_freq: Only relevant if validation data is provided. Integer
| or `collections_abc.Container` instance (e.g. list, tuple, etc.).
| If an integer, specifies how many training epochs to run before a
| new validation run is performed, e.g. `validation_freq=2` runs
| validation every 2 epochs. If a Container, specifies the epochs on
| which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
| validation at the end of the 1st, 2nd, and 10th epochs.
| max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
| input only. Maximum size for the generator queue.
| If unspecified, `max_queue_size` will default to 10.
| workers: Integer. Used for generator or `keras.utils.Sequence` input
| only. Maximum number of processes to spin up
| when using process-based threading. If unspecified, `workers`
| will default to 1. If 0, will execute the generator on the main
| thread.
| use_multiprocessing: Boolean. Used for generator or
| `keras.utils.Sequence` input only. If `True`, use process-based
| threading. If unspecified, `use_multiprocessing` will default to
| `False`. Note that because this implementation relies on
| multiprocessing, you should not pass non-picklable arguments to
| the generator as they can't be passed easily to children processes.
| **kwargs: Used for backwards compatibility.
|
| Returns:
| A `History` object. Its `History.history` attribute is
| a record of training loss values and metrics values
| at successive epochs, as well as validation loss values
| and validation metrics values (if applicable).
|
| Raises:
| RuntimeError: If the model was never compiled.
| ValueError: In case of mismatch between the provided input data
| and what the model expects.
|
| fit_generator(self, generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, validation_freq=1, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0)
| Fits the model on data yielded batch-by-batch by a Python generator.
|
| The generator is run in parallel to the model, for efficiency.
| For instance, this allows you to do real-time data augmentation
| on images on CPU in parallel to training your model on GPU.
|
| The use of `keras.utils.Sequence` guarantees the ordering
| and guarantees the single use of every input per epoch when
| using `use_multiprocessing=True`.
|
| Arguments:
| generator: A generator or an instance of `Sequence`
| (`keras.utils.Sequence`)
| object in order to avoid duplicate data
| when using multiprocessing.
| The output of the generator must be either
| - a tuple `(inputs, targets)`
| - a tuple `(inputs, targets, sample_weights)`.
| This tuple (a single output of the generator) makes a single batch.
| Therefore, all arrays in this tuple must have the same length (equal
| to the size of this batch). Different batches may have different
| sizes.
| For example, the last batch of the epoch is commonly smaller than
| the
| others, if the size of the dataset is not divisible by the batch
| size.
| The generator is expected to loop over its data
| indefinitely. An epoch finishes when `steps_per_epoch`
| batches have been seen by the model.
| steps_per_epoch: Total number of steps (batches of samples)
| to yield from `generator` before declaring one epoch
| finished and starting the next epoch. It should typically
| be equal to the number of samples of your dataset
| divided by the batch size.
| Optional for `Sequence`: if unspecified, will use
| the `len(generator)` as a number of steps.
| epochs: Integer, total number of iterations on the data.
| verbose: Verbosity mode, 0, 1, or 2.
| callbacks: List of callbacks to be called during training.
| validation_data: This can be either
| - a generator for the validation data
| - a tuple (inputs, targets)
| - a tuple (inputs, targets, sample_weights).
| validation_steps: Only relevant if `validation_data`
| is a generator. Total number of steps (batches of samples)
| to yield from `generator` before stopping.
| Optional for `Sequence`: if unspecified, will use
| the `len(validation_data)` as a number of steps.
| validation_freq: Only relevant if validation data is provided. Integer
| or `collections_abc.Container` instance (e.g. list, tuple, etc.).
| If an integer, specifies how many training epochs to run before a
| new validation run is performed, e.g. `validation_freq=2` runs
| validation every 2 epochs. If a Container, specifies the epochs on
| which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
| validation at the end of the 1st, 2nd, and 10th epochs.
| class_weight: Dictionary mapping class indices to a weight
| for the class.
| max_queue_size: Integer. Maximum size for the generator queue.
| If unspecified, `max_queue_size` will default to 10.
| workers: Integer. Maximum number of processes to spin up
| when using process-based threading.
| If unspecified, `workers` will default to 1. If 0, will
| execute the generator on the main thread.
| use_multiprocessing: Boolean.
| If `True`, use process-based threading.
| If unspecified, `use_multiprocessing` will default to `False`.
| Note that because this implementation relies on multiprocessing,
| you should not pass non-picklable arguments to the generator
| as they can't be passed easily to children processes.
| shuffle: Boolean. Whether to shuffle the order of the batches at
| the beginning of each epoch. Only used with instances
| of `Sequence` (`keras.utils.Sequence`).
| Has no effect when `steps_per_epoch` is not `None`.
| initial_epoch: Epoch at which to start training
| (useful for resuming a previous training run)
|
| Returns:
| A `History` object.
|
| Example:
|
| ```python
| def generate_arrays_from_file(path):
| while 1:
| f = open(path)
| for line in f:
| # create numpy arrays of input data
| # and labels, from each line in the file
| x1, x2, y = process_line(line)
| yield ({'input_1': x1, 'input_2': x2}, {'output': y})
| f.close()
|
| model.fit_generator(generate_arrays_from_file('/my_file.txt'),
| steps_per_epoch=10000, epochs=10)
| ```
| Raises:
| ValueError: In case the generator yields data in an invalid format.
|
| get_weights(self)
| Retrieves the weights of the model.
|
| Returns:
| A flat list of Numpy arrays.
|
| load_weights(self, filepath, by_name=False)
| Loads all layer weights, either from a TensorFlow or an HDF5 file.
|
| predict(self, x, batch_size=None, verbose=0, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False)
| Generates output predictions for the input samples.
|
| Computation is done in batches.
|
| Arguments:
| x: Input samples. It could be:
| - A Numpy array (or array-like), or a list of arrays
| (in case the model has multiple inputs).
| - A TensorFlow tensor, or a list of tensors
| (in case the model has multiple inputs).
| - A `tf.data` dataset.
| - A generator or `keras.utils.Sequence` instance.
| batch_size: Integer or `None`.
| Number of samples per gradient update.
| If unspecified, `batch_size` will default to 32.
| Do not specify the `batch_size` is your data is in the
| form of symbolic tensors, dataset,
| generators, or `keras.utils.Sequence` instances (since they generate
| batches).
| verbose: Verbosity mode, 0 or 1.
| steps: Total number of steps (batches of samples)
| before declaring the prediction round finished.
| Ignored with the default value of `None`. If x is a `tf.data`
| dataset and `steps` is None, `predict` will
| run until the input dataset is exhausted.
| callbacks: List of `keras.callbacks.Callback` instances.
| List of callbacks to apply during prediction.
| See [callbacks](/api_docs/python/tf/keras/callbacks).
| max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
| input only. Maximum size for the generator queue.
| If unspecified, `max_queue_size` will default to 10.
| workers: Integer. Used for generator or `keras.utils.Sequence` input
| only. Maximum number of processes to spin up when using
| process-based threading. If unspecified, `workers` will default
| to 1. If 0, will execute the generator on the main thread.
| use_multiprocessing: Boolean. Used for generator or
| `keras.utils.Sequence` input only. If `True`, use process-based
| threading. If unspecified, `use_multiprocessing` will default to
| `False`. Note that because this implementation relies on
| multiprocessing, you should not pass non-picklable arguments to
| the generator as they can't be passed easily to children processes.
|
|
| Returns:
| Numpy array(s) of predictions.
|
| Raises:
| ValueError: In case of mismatch between the provided
| input data and the model's expectations,
| or in case a stateful model receives a number of samples
| that is not a multiple of the batch size.
|
| predict_generator(self, generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0)
| Generates predictions for the input samples from a data generator.
|
| The generator should return the same kind of data as accepted by
| `predict_on_batch`.
|
| Arguments:
| generator: Generator yielding batches of input samples
| or an instance of `keras.utils.Sequence` object in order to
| avoid duplicate data when using multiprocessing.
| steps: Total number of steps (batches of samples)
| to yield from `generator` before stopping.
| Optional for `Sequence`: if unspecified, will use
| the `len(generator)` as a number of steps.
| callbacks: List of `keras.callbacks.Callback` instances.
| List of callbacks to apply during prediction.
| See [callbacks](/api_docs/python/tf/keras/callbacks).
| max_queue_size: Maximum size for the generator queue.
| workers: Integer. Maximum number of processes to spin up
| when using process-based threading.
| If unspecified, `workers` will default to 1. If 0, will
| execute the generator on the main thread.
| use_multiprocessing: Boolean.
| If `True`, use process-based threading.
| If unspecified, `use_multiprocessing` will default to `False`.
| Note that because this implementation relies on multiprocessing,
| you should not pass non-picklable arguments to the generator
| as they can't be passed easily to children processes.
| verbose: verbosity mode, 0 or 1.
|
| Returns:
| Numpy array(s) of predictions.
|
| Raises:
| ValueError: In case the generator yields data in an invalid format.
|
| predict_on_batch(self, x)
| Returns predictions for a single batch of samples.
|
| Arguments:
| x: Input data. It could be:
| - A Numpy array (or array-like), or a list of arrays
| (in case the model has multiple inputs).
| - A TensorFlow tensor, or a list of tensors
| (in case the model has multiple inputs).
| - A `tf.data` dataset.
|
| Returns:
| Numpy array(s) of predictions.
|
| Raises:
| ValueError: In case of mismatch between given number of inputs and
| expectations of the model.
|
| reset_metrics(self)
| Resets the state of metrics.
|
| test_on_batch(self, x, y=None, sample_weight=None, reset_metrics=True)
| Test the model on a single batch of samples.
|
| Arguments:
| x: Input data. It could be:
| - A Numpy array (or array-like), or a list of arrays
| (in case the model has multiple inputs).
| - A TensorFlow tensor, or a list of tensors
| (in case the model has multiple inputs).
| - A dict mapping input names to the corresponding array/tensors,
| if the model has named inputs.
| - A `tf.data` dataset.
| y: Target data. Like the input data `x`,
| it could be either Numpy array(s) or TensorFlow tensor(s).
| It should be consistent with `x` (you cannot have Numpy inputs and
| tensor targets, or inversely). If `x` is a dataset `y` should
| not be specified (since targets will be obtained from the iterator).
| sample_weight: Optional array of the same length as x, containing
| weights to apply to the model's loss for each sample.
| In the case of temporal data, you can pass a 2D array
| with shape (samples, sequence_length),
| to apply a different weight to every timestep of every sample.
| In this case you should make sure to specify
| sample_weight_mode="temporal" in compile(). This argument is not
| supported when `x` is a dataset.
| reset_metrics: If `True`, the metrics returned will be only for this
| batch. If `False`, the metrics will be statefully accumulated across
| batches.
|
| Returns:
| Scalar test loss (if the model has a single output and no metrics)
| or list of scalars (if the model has multiple outputs
| and/or metrics). The attribute `model.metrics_names` will give you
| the display labels for the scalar outputs.
|
| Raises:
| ValueError: In case of invalid user-provided arguments.
|
| train_on_batch(self, x, y=None, sample_weight=None, class_weight=None, reset_metrics=True)
| Runs a single gradient update on a single batch of data.
|
| Arguments:
| x: Input data. It could be:
| - A Numpy array (or array-like), or a list of arrays
| (in case the model has multiple inputs).
| - A TensorFlow tensor, or a list of tensors
| (in case the model has multiple inputs).
| - A dict mapping input names to the corresponding array/tensors,
| if the model has named inputs.
| - A `tf.data` dataset.
| y: Target data. Like the input data `x`, it could be either Numpy
| array(s) or TensorFlow tensor(s). It should be consistent with `x`
| (you cannot have Numpy inputs and tensor targets, or inversely). If
| `x` is a dataset, `y` should not be specified
| (since targets will be obtained from the iterator).
| sample_weight: Optional array of the same length as x, containing
| weights to apply to the model's loss for each sample. In the case of
| temporal data, you can pass a 2D array with shape (samples,
| sequence_length), to apply a different weight to every timestep of
| every sample. In this case you should make sure to specify
| sample_weight_mode="temporal" in compile(). This argument is not
| supported when `x` is a dataset.
| class_weight: Optional dictionary mapping class indices (integers) to a
| weight (float) to apply to the model's loss for the samples from this
| class during training. This can be useful to tell the model to "pay
| more attention" to samples from an under-represented class.
| reset_metrics: If `True`, the metrics returned will be only for this
| batch. If `False`, the metrics will be statefully accumulated across
| batches.
|
| Returns:
| Scalar training loss
| (if the model has a single output and no metrics)
| or list of scalars (if the model has multiple outputs
| and/or metrics). The attribute `model.metrics_names` will give you
| the display labels for the scalar outputs.
|
| Raises:
| ValueError: In case of invalid user-provided arguments.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from tensorflow.python.keras.engine.training.Model:
|
| metrics
| Returns the model's metrics added using `compile`, `add_metric` APIs.
|
| metrics_names
| Returns the model's display labels for all outputs.
|
| run_eagerly
| Settable attribute indicating whether the model should run eagerly.
|
| Running eagerly means that your model will be run step by step,
| like Python code. Your model might run slower, but it should become easier
| for you to debug it by stepping into individual layer calls.
|
| By default, we will attempt to compile your model to a static graph to
| deliver the best execution performance.
|
| Returns:
| Boolean, whether the model should run eagerly.
|
| sample_weights
|
| ----------------------------------------------------------------------
| Methods inherited from tensorflow.python.keras.engine.network.Network:
|
| __setattr__(self, name, value)
| Support self.foo = trackable syntax.
|
| get_layer(self, name=None, index=None)
| Retrieves a layer based on either its name (unique) or index.
|
| If `name` and `index` are both provided, `index` will take precedence.
| Indices are based on order of horizontal graph traversal (bottom-up).
|
| Arguments:
| name: String, name of layer.
| index: Integer, index of layer.
|
| Returns:
| A layer instance.
|
| Raises:
| ValueError: In case of invalid layer name or index.
|
| reset_states(self)
|
| save(self, filepath, overwrite=True, include_optimizer=True, save_format=None, signatures=None, options=None)
| Saves the model to Tensorflow SavedModel or a single HDF5 file.
|
| The savefile includes:
| - The model architecture, allowing to re-instantiate the model.
| - The model weights.
| - The state of the optimizer, allowing to resume training
| exactly where you left off.
|
| This allows you to save the entirety of the state of a model
| in a single file.
|
| Saved models can be reinstantiated via `keras.models.load_model`.
| The model returned by `load_model`
| is a compiled model ready to be used (unless the saved model
| was never compiled in the first place).
|
| Arguments:
| filepath: String, path to SavedModel or H5 file to save the model.
| overwrite: Whether to silently overwrite any existing file at the
| target location, or provide the user with a manual prompt.
| include_optimizer: If True, save optimizer's state together.
| save_format: Either 'tf' or 'h5', indicating whether to save the model
| to Tensorflow SavedModel or HDF5. The default is currently 'h5', but
| will switch to 'tf' in TensorFlow 2.0. The 'tf' option is currently
| disabled (use `tf.keras.experimental.export_saved_model` instead).
| signatures: Signatures to save with the SavedModel. Applicable to the 'tf'
| format only. Please see the `signatures` argument in
| `tf.saved_model.save` for details.
| options: Optional `tf.saved_model.SaveOptions` object that specifies
| options for saving to SavedModel.
|
| Example:
|
| ```python
| from keras.models import load_model
|
| model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
| del model # deletes the existing model
|
| # returns a compiled model
| # identical to the previous one
| model = load_model('my_model.h5')
| ```
|
| save_weights(self, filepath, overwrite=True, save_format=None)
| Saves all layer weights.
|
| Either saves in HDF5 or in TensorFlow format based on the `save_format`
| argument.
|
| When saving in HDF5 format, the weight file has:
| - `layer_names` (attribute), a list of strings
| (ordered names of model layers).
| - For every layer, a `group` named `layer.name`
| - For every such layer group, a group attribute `weight_names`,
| a list of strings
| (ordered names of weights tensor of the layer).
| - For every weight in the layer, a dataset
| storing the weight value, named after the weight tensor.
|
| When saving in TensorFlow format, all objects referenced by the network are
| saved in the same format as `tf.train.Checkpoint`, including any `Layer`
| instances or `Optimizer` instances assigned to object attributes. For
| networks constructed from inputs and outputs using `tf.keras.Model(inputs,
| outputs)`, `Layer` instances used by the network are tracked/saved
| automatically. For user-defined classes which inherit from `tf.keras.Model`,
| `Layer` instances must be assigned to object attributes, typically in the
| constructor. See the documentation of `tf.train.Checkpoint` and
| `tf.keras.Model` for details.
|
| While the formats are the same, do not mix `save_weights` and
| `tf.train.Checkpoint`. Checkpoints saved by `Model.save_weights` should be
| loaded using `Model.load_weights`. Checkpoints saved using
| `tf.train.Checkpoint.save` should be restored using the corresponding
| `tf.train.Checkpoint.restore`. Prefer `tf.train.Checkpoint` over
| `save_weights` for training checkpoints.
|
| The TensorFlow format matches objects and variables by starting at a root
| object, `self` for `save_weights`, and greedily matching attribute
| names. For `Model.save` this is the `Model`, and for `Checkpoint.save` this
| is the `Checkpoint` even if the `Checkpoint` has a model attached. This
| means saving a `tf.keras.Model` using `save_weights` and loading into a
| `tf.train.Checkpoint` with a `Model` attached (or vice versa) will not match
| the `Model`'s variables. See the [guide to training
| checkpoints](https://www.tensorflow.org/alpha/guide/checkpoints) for details
| on the TensorFlow format.
|
| Arguments:
| filepath: String, path to the file to save the weights to. When saving
| in TensorFlow format, this is the prefix used for checkpoint files
| (multiple files are generated). Note that the '.h5' suffix causes
| weights to be saved in HDF5 format.
| overwrite: Whether to silently overwrite any existing file at the
| target location, or provide the user with a manual prompt.
| save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or
| '.keras' will default to HDF5 if `save_format` is `None`. Otherwise
| `None` defaults to 'tf'.
|
| Raises:
| ImportError: If h5py is not available when attempting to save in HDF5
| format.
| ValueError: For invalid/unknown format arguments.
|
| summary(self, line_length=None, positions=None, print_fn=None)
| Prints a string summary of the network.
|
| Arguments:
| line_length: Total length of printed lines
| (e.g. set this to adapt the display to different
| terminal window sizes).
| positions: Relative or absolute positions of log elements
| in each line. If not provided,
| defaults to `[.33, .55, .67, 1.]`.
| print_fn: Print function to use. Defaults to `print`.
| It will be called on each line of the summary.
| You can set it to a custom function
| in order to capture the string summary.
|
| Raises:
| ValueError: if `summary()` is called before the model is built.
|
| to_json(self, **kwargs)
| Returns a JSON string containing the network configuration.
|
| To load a network from a JSON save file, use
| `keras.models.model_from_json(json_string, custom_objects={})`.
|
| Arguments:
| **kwargs: Additional keyword arguments
| to be passed to `json.dumps()`.
|
| Returns:
| A JSON string.
|
| to_yaml(self, **kwargs)
| Returns a yaml string containing the network configuration.
|
| To load a network from a yaml save file, use
| `keras.models.model_from_yaml(yaml_string, custom_objects={})`.
|
| `custom_objects` should be a dictionary mapping
| the names of custom losses / layers / etc to the corresponding
| functions / classes.
|
| Arguments:
| **kwargs: Additional keyword arguments
| to be passed to `yaml.dump()`.
|
| Returns:
| A YAML string.
|
| Raises:
| ImportError: if yaml module is not found.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from tensorflow.python.keras.engine.network.Network:
|
| non_trainable_weights
|
| state_updates
| Returns the `updates` from all layers that are stateful.
|
| This is useful for separating training updates and
| state updates, e.g. when we need to update a layer's internal state
| during prediction.
|
| Returns:
| A list of update ops.
|
| stateful
|
| trainable_weights
|
| weights
| Returns the list of all layer variables/weights.
|
| Returns:
| A list of variables.
|
| ----------------------------------------------------------------------
| Methods inherited from tensorflow.python.keras.engine.base_layer.Layer:
|
| __call__(self, inputs, *args, **kwargs)
| Wraps `call`, applying pre- and post-processing steps.
|
| Arguments:
| inputs: input tensor(s).
| *args: additional positional arguments to be passed to `self.call`.
| **kwargs: additional keyword arguments to be passed to `self.call`.
|
| Returns:
| Output tensor(s).
|
| Note:
| - The following optional keyword arguments are reserved for specific uses:
| * `training`: Boolean scalar tensor of Python boolean indicating
| whether the `call` is meant for training or inference.
| * `mask`: Boolean input mask.
| - If the layer's `call` method takes a `mask` argument (as some Keras
| layers do), its default value will be set to the mask generated
| for `inputs` by the previous layer (if `input` did come from
| a layer that generated a corresponding mask, i.e. if it came from
| a Keras layer with masking support.
|
| Raises:
| ValueError: if the layer's `call` method returns None (an invalid value).
|
| __delattr__(self, name)
| Implement delattr(self, name).
|
| add_loss(self, losses, inputs=None)
| Add loss tensor(s), potentially dependent on layer inputs.
|
| Some losses (for instance, activity regularization losses) may be dependent
| on the inputs passed when calling a layer. Hence, when reusing the same
| layer on different inputs `a` and `b`, some entries in `layer.losses` may
| be dependent on `a` and some on `b`. This method automatically keeps track
| of dependencies.
|
| This method can be used inside a subclassed layer or model's `call`
| function, in which case `losses` should be a Tensor or list of Tensors.
|
| Example:
|
| ```python
| class MyLayer(tf.keras.layers.Layer):
| def call(inputs, self):
| self.add_loss(tf.abs(tf.reduce_mean(inputs)), inputs=True)
| return inputs
| ```
|
| This method can also be called directly on a Functional Model during
| construction. In this case, any loss Tensors passed to this Model must
| be symbolic and be able to be traced back to the model's `Input`s. These
| losses become part of the model's topology and are tracked in `get_config`.
|
| Example:
|
| ```python
| inputs = tf.keras.Input(shape=(10,))
| x = tf.keras.layers.Dense(10)(inputs)
| outputs = tf.keras.layers.Dense(1)(x)
| model = tf.keras.Model(inputs, outputs)
| # Actvity regularization.
| model.add_loss(tf.abs(tf.reduce_mean(x)))
| ```
|
| If this is not the case for your loss (if, for example, your loss references
| a `Variable` of one of the model's layers), you can wrap your loss in a
| zero-argument lambda. These losses are not tracked as part of the model's
| topology since they can't be serialized.
|
| Example:
|
| ```python
| inputs = tf.keras.Input(shape=(10,))
| x = tf.keras.layers.Dense(10)(inputs)
| outputs = tf.keras.layers.Dense(1)(x)
| model = tf.keras.Model(inputs, outputs)
| # Weight regularization.
| model.add_loss(lambda: tf.reduce_mean(x.kernel))
| ```
|
| The `get_losses_for` method allows to retrieve the losses relevant to a
| specific set of inputs.
|
| Arguments:
| losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses
| may also be zero-argument callables which create a loss tensor.
| inputs: Ignored when executing eagerly. If anything other than None is
| passed, it signals the losses are conditional on some of the layer's
| inputs, and thus they should only be run where these inputs are
| available. This is the case for activity regularization losses, for
| instance. If `None` is passed, the losses are assumed
| to be unconditional, and will apply across all dataflows of the layer
| (e.g. weight regularization losses).
|
| add_metric(self, value, aggregation=None, name=None)
| Adds metric tensor to the layer.
|
| Args:
| value: Metric tensor.
| aggregation: Sample-wise metric reduction function. If `aggregation=None`,
| it indicates that the metric tensor provided has been aggregated
| already. eg, `bin_acc = BinaryAccuracy(name='acc')` followed by
| `model.add_metric(bin_acc(y_true, y_pred))`. If aggregation='mean', the
| given metric tensor will be sample-wise reduced using `mean` function.
| eg, `model.add_metric(tf.reduce_sum(outputs), name='output_mean',
| aggregation='mean')`.
| name: String metric name.
|
| Raises:
| ValueError: If `aggregation` is anything other than None or `mean`.
|
| add_update(self, updates, inputs=None)
| Add update op(s), potentially dependent on layer inputs. (deprecated arguments)
|
| Warning: SOME ARGUMENTS ARE DEPRECATED: `(inputs)`. They will be removed in a future version.
| Instructions for updating:
| `inputs` is now automatically inferred
|
| Weight updates (for instance, the updates of the moving mean and variance
| in a BatchNormalization layer) may be dependent on the inputs passed
| when calling a layer. Hence, when reusing the same layer on
| different inputs `a` and `b`, some entries in `layer.updates` may be
| dependent on `a` and some on `b`. This method automatically keeps track
| of dependencies.
|
| The `get_updates_for` method allows to retrieve the updates relevant to a
| specific set of inputs.
|
| This call is ignored when eager execution is enabled (in that case, variable
| updates are run on the fly and thus do not need to be tracked for later
| execution).
|
| Arguments:
| updates: Update op, or list/tuple of update ops, or zero-arg callable
| that returns an update op. A zero-arg callable should be passed in
| order to disable running the updates by setting `trainable=False`
| on this Layer, when executing in Eager mode.
| inputs: Deprecated, will be automatically inferred.
|
| add_variable(self, *args, **kwargs)
| Deprecated, do NOT use! Alias for `add_weight`. (deprecated)
|
| Warning: THIS FUNCTION IS DEPRECATED. It will be removed in a future version.
| Instructions for updating:
| Please use `layer.add_weight` method instead.
|
| add_weight(self, name=None, shape=None, dtype=None, initializer=None, regularizer=None, trainable=None, constraint=None, partitioner=None, use_resource=None, synchronization=<VariableSynchronization.AUTO: 0>, aggregation=<VariableAggregation.NONE: 0>, **kwargs)
| Adds a new variable to the layer.
|
| Arguments:
| name: Variable name.
| shape: Variable shape. Defaults to scalar if unspecified.
| dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
| initializer: Initializer instance (callable).
| regularizer: Regularizer instance (callable).
| trainable: Boolean, whether the variable should be part of the layer's
| "trainable_variables" (e.g. variables, biases)
| or "non_trainable_variables" (e.g. BatchNorm mean and variance).
| Note that `trainable` cannot be `True` if `synchronization`
| is set to `ON_READ`.
| constraint: Constraint instance (callable).
| partitioner: Partitioner to be passed to the `Trackable` API.
| use_resource: Whether to use `ResourceVariable`.
| synchronization: Indicates when a distributed a variable will be
| aggregated. Accepted values are constants defined in the class
| `tf.VariableSynchronization`. By default the synchronization is set to
| `AUTO` and the current `DistributionStrategy` chooses
| when to synchronize. If `synchronization` is set to `ON_READ`,
| `trainable` must not be set to `True`.
| aggregation: Indicates how a distributed variable will be aggregated.
| Accepted values are constants defined in the class
| `tf.VariableAggregation`.
| **kwargs: Additional keyword arguments. Accepted values are `getter` and
| `collections`.
|
| Returns:
| The created variable. Usually either a `Variable` or `ResourceVariable`
| instance. If `partitioner` is not `None`, a `PartitionedVariable`
| instance is returned.
|
| Raises:
| RuntimeError: If called with partitioned variable regularization and
| eager execution is enabled.
| ValueError: When giving unsupported dtype and no initializer or when
| trainable has been set to True with synchronization set as `ON_READ`.
|
| apply(self, inputs, *args, **kwargs)
| Deprecated, do NOT use! (deprecated)
|
| Warning: THIS FUNCTION IS DEPRECATED. It will be removed in a future version.
| Instructions for updating:
| Please use `layer.__call__` method instead.
|
| This is an alias of `self.__call__`.
|
| Arguments:
| inputs: Input tensor(s).
| *args: additional positional arguments to be passed to `self.call`.
| **kwargs: additional keyword arguments to be passed to `self.call`.
|
| Returns:
| Output tensor(s).
|
| compute_output_signature(self, input_signature)
| Compute the output tensor signature of the layer based on the inputs.
|
| Unlike a TensorShape object, a TensorSpec object contains both shape
| and dtype information for a tensor. This method allows layers to provide
| output dtype information if it is different from the input dtype.
| For any layer that doesn't implement this function,
| the framework will fall back to use `compute_output_shape`, and will
| assume that the output dtype matches the input dtype.
|
| Args:
| input_signature: Single TensorSpec or nested structure of TensorSpec
| objects, describing a candidate input for the layer.
|
| Returns:
| Single TensorSpec or nested structure of TensorSpec objects, describing
| how the layer would transform the provided input.
|
| Raises:
| TypeError: If input_signature contains a non-TensorSpec object.
|
| count_params(self)
| Count the total number of scalars composing the weights.
|
| Returns:
| An integer count.
|
| Raises:
| ValueError: if the layer isn't yet built
| (in which case its weights aren't yet defined).
|
| get_input_at(self, node_index)
| Retrieves the input tensor(s) of a layer at a given node.
|
| Arguments:
| node_index: Integer, index of the node
| from which to retrieve the attribute.
| E.g. `node_index=0` will correspond to the
| first time the layer was called.
|
| Returns:
| A tensor (or list of tensors if the layer has multiple inputs).
|
| Raises:
| RuntimeError: If called in Eager mode.
|
| get_input_mask_at(self, node_index)
| Retrieves the input mask tensor(s) of a layer at a given node.
|
| Arguments:
| node_index: Integer, index of the node
| from which to retrieve the attribute.
| E.g. `node_index=0` will correspond to the
| first time the layer was called.
|
| Returns:
| A mask tensor
| (or list of tensors if the layer has multiple inputs).
|
| get_input_shape_at(self, node_index)
| Retrieves the input shape(s) of a layer at a given node.
|
| Arguments:
| node_index: Integer, index of the node
| from which to retrieve the attribute.
| E.g. `node_index=0` will correspond to the
| first time the layer was called.
|
| Returns:
| A shape tuple
| (or list of shape tuples if the layer has multiple inputs).
|
| Raises:
| RuntimeError: If called in Eager mode.
|
| get_losses_for(self, inputs)
| Retrieves losses relevant to a specific set of inputs.
|
| Arguments:
| inputs: Input tensor or list/tuple of input tensors.
|
| Returns:
| List of loss tensors of the layer that depend on `inputs`.
|
| get_output_at(self, node_index)
| Retrieves the output tensor(s) of a layer at a given node.
|
| Arguments:
| node_index: Integer, index of the node
| from which to retrieve the attribute.
| E.g. `node_index=0` will correspond to the
| first time the layer was called.
|
| Returns:
| A tensor (or list of tensors if the layer has multiple outputs).
|
| Raises:
| RuntimeError: If called in Eager mode.
|
| get_output_mask_at(self, node_index)
| Retrieves the output mask tensor(s) of a layer at a given node.
|
| Arguments:
| node_index: Integer, index of the node
| from which to retrieve the attribute.
| E.g. `node_index=0` will correspond to the
| first time the layer was called.
|
| Returns:
| A mask tensor
| (or list of tensors if the layer has multiple outputs).
|
| get_output_shape_at(self, node_index)
| Retrieves the output shape(s) of a layer at a given node.
|
| Arguments:
| node_index: Integer, index of the node
| from which to retrieve the attribute.
| E.g. `node_index=0` will correspond to the
| first time the layer was called.
|
| Returns:
| A shape tuple
| (or list of shape tuples if the layer has multiple outputs).
|
| Raises:
| RuntimeError: If called in Eager mode.
|
| get_updates_for(self, inputs)
| Retrieves updates relevant to a specific set of inputs.
|
| Arguments:
| inputs: Input tensor or list/tuple of input tensors.
|
| Returns:
| List of update ops of the layer that depend on `inputs`.
|
| set_weights(self, weights)
| Sets the weights of the layer, from Numpy arrays.
|
| Arguments:
| weights: a list of Numpy arrays. The number
| of arrays and their shape must match
| number of the dimensions of the weights
| of the layer (i.e. it should match the
| output of `get_weights`).
|
| Raises:
| ValueError: If the provided weights list does not match the
| layer's specifications.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from tensorflow.python.keras.engine.base_layer.Layer:
|
| activity_regularizer
| Optional regularizer function for the output of this layer.
|
| dtype
|
| inbound_nodes
| Deprecated, do NOT use! Only for compatibility with external Keras.
|
| input
| Retrieves the input tensor(s) of a layer.
|
| Only applicable if the layer has exactly one input,
| i.e. if it is connected to one incoming layer.
|
| Returns:
| Input tensor or list of input tensors.
|
| Raises:
| RuntimeError: If called in Eager mode.
| AttributeError: If no inbound nodes are found.
|
| input_mask
| Retrieves the input mask tensor(s) of a layer.
|
| Only applicable if the layer has exactly one inbound node,
| i.e. if it is connected to one incoming layer.
|
| Returns:
| Input mask tensor (potentially None) or list of input
| mask tensors.
|
| Raises:
| AttributeError: if the layer is connected to
| more than one incoming layers.
|
| input_shape
| Retrieves the input shape(s) of a layer.
|
| Only applicable if the layer has exactly one input,
| i.e. if it is connected to one incoming layer, or if all inputs
| have the same shape.
|
| Returns:
| Input shape, as an integer shape tuple
| (or list of shape tuples, one tuple per input tensor).
|
| Raises:
| AttributeError: if the layer has no defined input_shape.
| RuntimeError: if called in Eager mode.
|
| losses
| Losses which are associated with this `Layer`.
|
| Variable regularization tensors are created when this property is accessed,
| so it is eager safe: accessing `losses` under a `tf.GradientTape` will
| propagate gradients back to the corresponding variables.
|
| Returns:
| A list of tensors.
|
| name
| Returns the name of this module as passed or determined in the ctor.
|
| NOTE: This is not the same as the `self.name_scope.name` which includes
| parent module names.
|
| non_trainable_variables
|
| outbound_nodes
| Deprecated, do NOT use! Only for compatibility with external Keras.
|
| output
| Retrieves the output tensor(s) of a layer.
|
| Only applicable if the layer has exactly one output,
| i.e. if it is connected to one incoming layer.
|
| Returns:
| Output tensor or list of output tensors.
|
| Raises:
| AttributeError: if the layer is connected to more than one incoming
| layers.
| RuntimeError: if called in Eager mode.
|
| output_mask
| Retrieves the output mask tensor(s) of a layer.
|
| Only applicable if the layer has exactly one inbound node,
| i.e. if it is connected to one incoming layer.
|
| Returns:
| Output mask tensor (potentially None) or list of output
| mask tensors.
|
| Raises:
| AttributeError: if the layer is connected to
| more than one incoming layers.
|
| output_shape
| Retrieves the output shape(s) of a layer.
|
| Only applicable if the layer has one output,
| or if all outputs have the same shape.
|
| Returns:
| Output shape, as an integer shape tuple
| (or list of shape tuples, one tuple per output tensor).
|
| Raises:
| AttributeError: if the layer has no defined output shape.
| RuntimeError: if called in Eager mode.
|
| trainable
|
| trainable_variables
| Sequence of variables owned by this module and it's submodules.
|
| Note: this method uses reflection to find variables on the current instance
| and submodules. For performance reasons you may wish to cache the result
| of calling this method if you don't expect the return value to change.
|
| Returns:
| A sequence of variables for the current module (sorted by attribute
| name) followed by variables from all submodules recursively (breadth
| first).
|
| updates
|
| variables
| Returns the list of all layer variables/weights.
|
| Alias of `self.weights`.
|
| Returns:
| A list of variables.
|
| ----------------------------------------------------------------------
| Class methods inherited from tensorflow.python.module.module.Module:
|
| with_name_scope(method) from builtins.type
| Decorator to automatically enter the module name scope.
|
| ```
| class MyModule(tf.Module):
| @tf.Module.with_name_scope
| def __call__(self, x):
| if not hasattr(self, 'w'):
| self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
| return tf.matmul(x, self.w)
| ```
|
| Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
| names included the module name:
|
| ```
| mod = MyModule()
| mod(tf.ones([8, 32]))
| # ==> <tf.Tensor: ...>
| mod.w
| # ==> <tf.Variable ...'my_module/w:0'>
| ```
|
| Args:
| method: The method to wrap.
|
| Returns:
| The original method wrapped such that it enters the module's name scope.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from tensorflow.python.module.module.Module:
|
| name_scope
| Returns a `tf.name_scope` instance for this class.
|
| submodules
| Sequence of all sub-modules.
|
| Submodules are modules which are properties of this module, or found as
| properties of modules which are properties of this module (and so on).
|
| ```
| a = tf.Module()
| b = tf.Module()
| c = tf.Module()
| a.b = b
| b.c = c
| assert list(a.submodules) == [b, c]
| assert list(b.submodules) == [c]
| assert list(c.submodules) == []
| ```
|
| Returns:
| A sequence of all submodules.
|
| ----------------------------------------------------------------------
| Data descriptors inherited from tensorflow.python.training.tracking.base.Trackable:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Creating a ModelThere are two ways to create models through the TF 2 Keras API, either pass in a list of layers all at once, or add them one by one.Let's show both methods (its up to you to choose which method you prefer).
###Code
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
###Output
_____no_output_____
###Markdown
Model - as a list of layers
###Code
model = Sequential([
Dense(units=2),
Dense(units=2),
Dense(units=2)
])
###Output
_____no_output_____
###Markdown
Model - adding in layers one by one
###Code
model = Sequential()
model.add(Dense(2))
model.add(Dense(2))
model.add(Dense(2))
###Output
_____no_output_____
###Markdown
Let's go ahead and build a simple model and then compile it by defining our solver
###Code
model = Sequential()
model.add(Dense(4,activation='relu'))
model.add(Dense(4,activation='relu'))
model.add(Dense(4,activation='relu'))
# Final output node for prediction
model.add(Dense(1))
model.compile(optimizer='rmsprop',loss='mse')
###Output
_____no_output_____
###Markdown
Choosing an optimizer and lossKeep in mind what kind of problem you are trying to solve: For a multi-class classification problem model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) For a binary classification problem model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) For a mean squared error regression problem model.compile(optimizer='rmsprop', loss='mse') TrainingBelow are some common definitions that are necessary to know and understand to correctly utilize Keras:* Sample: one element of a dataset. * Example: one image is a sample in a convolutional network * Example: one audio file is a sample for a speech recognition model* Batch: a set of N samples. The samples in a batch are processed independently, in parallel. If training, a batch results in only one update to the model.A batch generally approximates the distribution of the input data better than a single input. The larger the batch, the better the approximation; however, it is also true that the batch will take longer to process and will still result in only one update. For inference (evaluate/predict), it is recommended to pick a batch size that is as large as you can afford without going out of memory (since larger batches will usually result in faster evaluation/prediction).* Epoch: an arbitrary cutoff, generally defined as "one pass over the entire dataset", used to separate training into distinct phases, which is useful for logging and periodic evaluation.* When using validation_data or validation_split with the fit method of Keras models, evaluation will be run at the end of every epoch.* Within Keras, there is the ability to add callbacks specifically designed to be run at the end of an epoch. Examples of these are learning rate changes and model checkpointing (saving).
###Code
model.fit(X_train,y_train,epochs=250)
###Output
Train on 700 samples
Epoch 1/250
700/700 [==============================] - 1s 1ms/sample - loss: 256678.6899
Epoch 2/250
700/700 [==============================] - 0s 67us/sample - loss: 256557.3328
Epoch 3/250
700/700 [==============================] - 0s 67us/sample - loss: 256435.2685
Epoch 4/250
700/700 [==============================] - 0s 69us/sample - loss: 256297.5242
Epoch 5/250
700/700 [==============================] - 0s 67us/sample - loss: 256139.6521
Epoch 6/250
700/700 [==============================] - 0s 89us/sample - loss: 255959.0959
Epoch 7/250
700/700 [==============================] - 0s 56us/sample - loss: 255751.4558
Epoch 8/250
700/700 [==============================] - 0s 89us/sample - loss: 255515.1171
Epoch 9/250
700/700 [==============================] - 0s 67us/sample - loss: 255240.5993
Epoch 10/250
700/700 [==============================] - 0s 89us/sample - loss: 254925.4916
Epoch 11/250
700/700 [==============================] - 0s 69us/sample - loss: 254567.7298
Epoch 12/250
700/700 [==============================] - 0s 67us/sample - loss: 254163.5860
Epoch 13/250
700/700 [==============================] - 0s 67us/sample - loss: 253711.2249
Epoch 14/250
700/700 [==============================] - 0s 57us/sample - loss: 253207.9388
Epoch 15/250
700/700 [==============================] - 0s 89us/sample - loss: 252649.8949
Epoch 16/250
700/700 [==============================] - 0s 67us/sample - loss: 252035.8005
Epoch 17/250
700/700 [==============================] - 0s 89us/sample - loss: 251361.9668
Epoch 18/250
700/700 [==============================] - 0s 69us/sample - loss: 250630.4323
Epoch 19/250
700/700 [==============================] - 0s 89us/sample - loss: 249834.5367
Epoch 20/250
700/700 [==============================] - 0s 67us/sample - loss: 248964.4419
Epoch 21/250
700/700 [==============================] - 0s 89us/sample - loss: 248029.2328
Epoch 22/250
700/700 [==============================] - 0s 67us/sample - loss: 247016.8577
Epoch 23/250
700/700 [==============================] - 0s 89us/sample - loss: 245919.6555
Epoch 24/250
700/700 [==============================] - 0s 67us/sample - loss: 244745.7887
Epoch 25/250
700/700 [==============================] - 0s 89us/sample - loss: 243485.6529
Epoch 26/250
700/700 [==============================] - 0s 67us/sample - loss: 242129.3484
Epoch 27/250
700/700 [==============================] - 0s 89us/sample - loss: 240689.1388
Epoch 28/250
700/700 [==============================] - 0s 67us/sample - loss: 239153.4667
Epoch 29/250
700/700 [==============================] - 0s 89us/sample - loss: 237520.4308
Epoch 30/250
700/700 [==============================] - 0s 67us/sample - loss: 235783.5987
Epoch 31/250
700/700 [==============================] - 0s 89us/sample - loss: 233942.2699
Epoch 32/250
700/700 [==============================] - 0s 69us/sample - loss: 231982.6838
Epoch 33/250
700/700 [==============================] - 0s 67us/sample - loss: 229905.5206
Epoch 34/250
700/700 [==============================] - 0s 89us/sample - loss: 227726.2409
Epoch 35/250
700/700 [==============================] - 0s 67us/sample - loss: 225433.7657
Epoch 36/250
700/700 [==============================] - 0s 91us/sample - loss: 223007.5024
Epoch 37/250
700/700 [==============================] - 0s 67us/sample - loss: 220470.3121
Epoch 38/250
700/700 [==============================] - 0s 67us/sample - loss: 217800.4992
Epoch 39/250
700/700 [==============================] - 0s 89us/sample - loss: 215000.5040
Epoch 40/250
700/700 [==============================] - 0s 67us/sample - loss: 212070.4630
Epoch 41/250
700/700 [==============================] - 0s 89us/sample - loss: 209021.6112
Epoch 42/250
700/700 [==============================] - 0s 67us/sample - loss: 205820.6153
Epoch 43/250
700/700 [==============================] - 0s 67us/sample - loss: 202485.9254
Epoch 44/250
700/700 [==============================] - 0s 89us/sample - loss: 199032.7301
Epoch 45/250
700/700 [==============================] - 0s 67us/sample - loss: 195436.0692
Epoch 46/250
700/700 [==============================] - 0s 89us/sample - loss: 191699.3609
Epoch 47/250
700/700 [==============================] - 0s 67us/sample - loss: 187801.8943
Epoch 48/250
700/700 [==============================] - 0s 67us/sample - loss: 183781.5669
Epoch 49/250
700/700 [==============================] - 0s 89us/sample - loss: 179660.2206
Epoch 50/250
700/700 [==============================] - 0s 67us/sample - loss: 175374.3602
Epoch 51/250
700/700 [==============================] - 0s 89us/sample - loss: 170959.2488
Epoch 52/250
700/700 [==============================] - 0s 67us/sample - loss: 166390.8793
Epoch 53/250
700/700 [==============================] - 0s 89us/sample - loss: 161693.8322
Epoch 54/250
700/700 [==============================] - 0s 67us/sample - loss: 156896.2863
Epoch 55/250
700/700 [==============================] - 0s 67us/sample - loss: 151958.7138
Epoch 56/250
700/700 [==============================] - 0s 67us/sample - loss: 146943.3821
Epoch 57/250
700/700 [==============================] - 0s 67us/sample - loss: 141799.3351
Epoch 58/250
700/700 [==============================] - 0s 67us/sample - loss: 136534.7192
Epoch 59/250
700/700 [==============================] - 0s 67us/sample - loss: 131191.1925
Epoch 60/250
700/700 [==============================] - 0s 67us/sample - loss: 125746.5604
Epoch 61/250
700/700 [==============================] - 0s 89us/sample - loss: 120214.6602
Epoch 62/250
700/700 [==============================] - 0s 67us/sample - loss: 114611.5430
Epoch 63/250
700/700 [==============================] - 0s 67us/sample - loss: 108961.6057
Epoch 64/250
700/700 [==============================] - 0s 89us/sample - loss: 103238.3104
Epoch 65/250
700/700 [==============================] - 0s 67us/sample - loss: 97488.6292
Epoch 66/250
700/700 [==============================] - 0s 67us/sample - loss: 91736.5993
Epoch 67/250
700/700 [==============================] - 0s 78us/sample - loss: 85975.4235
Epoch 68/250
700/700 [==============================] - 0s 67us/sample - loss: 80189.9361
Epoch 69/250
700/700 [==============================] - 0s 89us/sample - loss: 74465.9286
Epoch 70/250
700/700 [==============================] - 0s 67us/sample - loss: 68733.6601
Epoch 71/250
700/700 [==============================] - 0s 69us/sample - loss: 63123.0146
Epoch 72/250
700/700 [==============================] - 0s 89us/sample - loss: 57568.7673
Epoch 73/250
700/700 [==============================] - 0s 67us/sample - loss: 52143.8000
Epoch 74/250
700/700 [==============================] - 0s 67us/sample - loss: 46841.6530
Epoch 75/250
700/700 [==============================] - 0s 67us/sample - loss: 41664.3811
Epoch 76/250
700/700 [==============================] - 0s 89us/sample - loss: 36710.3025
Epoch 77/250
700/700 [==============================] - 0s 67us/sample - loss: 31980.2638
Epoch 78/250
700/700 [==============================] - 0s 67us/sample - loss: 27490.0044
Epoch 79/250
700/700 [==============================] - 0s 67us/sample - loss: 23295.2193
Epoch 80/250
700/700 [==============================] - 0s 89us/sample - loss: 19399.2424
Epoch 81/250
700/700 [==============================] - 0s 67us/sample - loss: 15821.4121
Epoch 82/250
700/700 [==============================] - 0s 67us/sample - loss: 12634.9319
Epoch 83/250
700/700 [==============================] - 0s 67us/sample - loss: 9866.9726
Epoch 84/250
700/700 [==============================] - 0s 67us/sample - loss: 7541.5573
Epoch 85/250
700/700 [==============================] - 0s 67us/sample - loss: 5719.8526
Epoch 86/250
700/700 [==============================] - 0s 67us/sample - loss: 4370.8675
Epoch 87/250
700/700 [==============================] - 0s 67us/sample - loss: 3482.8717
Epoch 88/250
700/700 [==============================] - 0s 67us/sample - loss: 3081.3459
Epoch 89/250
700/700 [==============================] - 0s 89us/sample - loss: 2955.0584
Epoch 90/250
700/700 [==============================] - 0s 67us/sample - loss: 2919.0084
Epoch 91/250
700/700 [==============================] - 0s 67us/sample - loss: 2878.1071
Epoch 92/250
700/700 [==============================] - 0s 85us/sample - loss: 2835.3627
Epoch 93/250
700/700 [==============================] - 0s 67us/sample - loss: 2793.9308
Epoch 94/250
700/700 [==============================] - 0s 89us/sample - loss: 2754.3078
Epoch 95/250
700/700 [==============================] - 0s 69us/sample - loss: 2718.6959
Epoch 96/250
700/700 [==============================] - 0s 67us/sample - loss: 2676.1233
Epoch 97/250
700/700 [==============================] - 0s 67us/sample - loss: 2641.5044
Epoch 98/250
700/700 [==============================] - 0s 67us/sample - loss: 2600.8627
Epoch 99/250
700/700 [==============================] - 0s 67us/sample - loss: 2567.6836
Epoch 100/250
700/700 [==============================] - 0s 67us/sample - loss: 2527.4432
Epoch 101/250
700/700 [==============================] - 0s 67us/sample - loss: 2494.8283
Epoch 102/250
700/700 [==============================] - 0s 89us/sample - loss: 2459.9839
Epoch 103/250
700/700 [==============================] - 0s 80us/sample - loss: 2422.0237
Epoch 104/250
700/700 [==============================] - 0s 67us/sample - loss: 2385.5557
Epoch 105/250
700/700 [==============================] - 0s 67us/sample - loss: 2352.6271
Epoch 106/250
700/700 [==============================] - 0s 67us/sample - loss: 2315.9826
Epoch 107/250
700/700 [==============================] - 0s 69us/sample - loss: 2275.5747
Epoch 108/250
700/700 [==============================] - 0s 67us/sample - loss: 2240.5681
Epoch 109/250
700/700 [==============================] - 0s 67us/sample - loss: 2202.7267
Epoch 110/250
700/700 [==============================] - 0s 78us/sample - loss: 2164.8818
Epoch 111/250
700/700 [==============================] - 0s 67us/sample - loss: 2128.8680
Epoch 112/250
700/700 [==============================] - 0s 67us/sample - loss: 2093.5601
Epoch 113/250
700/700 [==============================] - 0s 89us/sample - loss: 2059.8525
Epoch 114/250
700/700 [==============================] - 0s 67us/sample - loss: 2027.5212
Epoch 115/250
700/700 [==============================] - 0s 69us/sample - loss: 1993.6040
Epoch 116/250
700/700 [==============================] - 0s 67us/sample - loss: 1956.8016
Epoch 117/250
700/700 [==============================] - 0s 89us/sample - loss: 1925.7439
Epoch 118/250
700/700 [==============================] - 0s 67us/sample - loss: 1893.9992
Epoch 119/250
700/700 [==============================] - 0s 67us/sample - loss: 1859.5495
Epoch 120/250
700/700 [==============================] - 0s 67us/sample - loss: 1829.7004
Epoch 121/250
700/700 [==============================] - 0s 67us/sample - loss: 1794.5159
Epoch 122/250
700/700 [==============================] - 0s 89us/sample - loss: 1762.4011
Epoch 123/250
700/700 [==============================] - 0s 67us/sample - loss: 1731.3614
Epoch 124/250
700/700 [==============================] - 0s 67us/sample - loss: 1694.8818
Epoch 125/250
700/700 [==============================] - 0s 67us/sample - loss: 1660.6659
Epoch 126/250
700/700 [==============================] - 0s 69us/sample - loss: 1628.8121
Epoch 127/250
700/700 [==============================] - 0s 67us/sample - loss: 1596.7363
Epoch 128/250
700/700 [==============================] - 0s 89us/sample - loss: 1561.3069
Epoch 129/250
700/700 [==============================] - 0s 67us/sample - loss: 1525.3697
Epoch 130/250
700/700 [==============================] - 0s 67us/sample - loss: 1501.4490
Epoch 131/250
700/700 [==============================] - 0s 67us/sample - loss: 1471.8032
Epoch 132/250
700/700 [==============================] - 0s 69us/sample - loss: 1441.8526
Epoch 133/250
700/700 [==============================] - 0s 67us/sample - loss: 1411.3840
Epoch 134/250
700/700 [==============================] - 0s 67us/sample - loss: 1375.3392
Epoch 135/250
700/700 [==============================] - 0s 67us/sample - loss: 1344.4005
Epoch 136/250
700/700 [==============================] - 0s 67us/sample - loss: 1316.0051
Epoch 137/250
700/700 [==============================] - 0s 67us/sample - loss: 1286.1575
Epoch 138/250
700/700 [==============================] - 0s 67us/sample - loss: 1258.5466
Epoch 139/250
700/700 [==============================] - 0s 89us/sample - loss: 1231.0350
Epoch 140/250
700/700 [==============================] - 0s 67us/sample - loss: 1202.8353
Epoch 141/250
700/700 [==============================] - 0s 67us/sample - loss: 1171.3123
Epoch 142/250
700/700 [==============================] - 0s 67us/sample - loss: 1145.8823
Epoch 143/250
700/700 [==============================] - 0s 67us/sample - loss: 1117.1228
Epoch 144/250
700/700 [==============================] - 0s 67us/sample - loss: 1091.9406
Epoch 145/250
700/700 [==============================] - 0s 67us/sample - loss: 1066.3266
Epoch 146/250
700/700 [==============================] - 0s 67us/sample - loss: 1034.5236
Epoch 147/250
700/700 [==============================] - 0s 67us/sample - loss: 1009.6341
Epoch 148/250
700/700 [==============================] - 0s 89us/sample - loss: 982.0937
Epoch 149/250
700/700 [==============================] - 0s 67us/sample - loss: 954.0501
Epoch 150/250
700/700 [==============================] - 0s 67us/sample - loss: 926.7213
Epoch 151/250
700/700 [==============================] - 0s 67us/sample - loss: 903.3459
Epoch 152/250
700/700 [==============================] - 0s 67us/sample - loss: 873.8258
Epoch 153/250
700/700 [==============================] - 0s 89us/sample - loss: 846.7390
Epoch 154/250
700/700 [==============================] - 0s 67us/sample - loss: 822.1480
Epoch 155/250
700/700 [==============================] - 0s 67us/sample - loss: 795.3657
Epoch 156/250
700/700 [==============================] - 0s 88us/sample - loss: 770.9504
Epoch 157/250
700/700 [==============================] - 0s 89us/sample - loss: 744.3620
Epoch 158/250
700/700 [==============================] - 0s 67us/sample - loss: 719.1004
Epoch 159/250
700/700 [==============================] - 0s 113us/sample - loss: 696.3267
Epoch 160/250
700/700 [==============================] - 0s 89us/sample - loss: 671.8435
Epoch 161/250
700/700 [==============================] - 0s 100us/sample - loss: 649.7230
Epoch 162/250
700/700 [==============================] - 0s 97us/sample - loss: 627.0320
Epoch 163/250
700/700 [==============================] - 0s 89us/sample - loss: 605.2505
Epoch 164/250
700/700 [==============================] - 0s 89us/sample - loss: 582.2282
Epoch 165/250
700/700 [==============================] - 0s 134us/sample - loss: 561.1635
Epoch 166/250
700/700 [==============================] - 0s 89us/sample - loss: 541.3536
Epoch 167/250
700/700 [==============================] - 0s 89us/sample - loss: 522.3132
Epoch 168/250
700/700 [==============================] - 0s 69us/sample - loss: 503.2385
Epoch 169/250
700/700 [==============================] - 0s 89us/sample - loss: 481.9888
Epoch 170/250
700/700 [==============================] - 0s 89us/sample - loss: 461.5032
Epoch 171/250
700/700 [==============================] - 0s 89us/sample - loss: 442.1222
Epoch 172/250
700/700 [==============================] - 0s 67us/sample - loss: 423.0606
Epoch 173/250
700/700 [==============================] - 0s 89us/sample - loss: 403.8695
Epoch 174/250
700/700 [==============================] - 0s 84us/sample - loss: 386.0664
Epoch 175/250
700/700 [==============================] - 0s 70us/sample - loss: 370.9212
Epoch 176/250
700/700 [==============================] - 0s 89us/sample - loss: 352.6306
Epoch 177/250
700/700 [==============================] - 0s 67us/sample - loss: 333.7979
Epoch 178/250
700/700 [==============================] - 0s 67us/sample - loss: 316.0235
Epoch 179/250
700/700 [==============================] - 0s 67us/sample - loss: 296.4844
Epoch 180/250
700/700 [==============================] - 0s 69us/sample - loss: 280.1557
Epoch 181/250
700/700 [==============================] - 0s 67us/sample - loss: 263.3886
Epoch 182/250
###Markdown
EvaluationLet's evaluate our performance on our training set and our test set. We can compare these two performances to check for overfitting.
###Code
model.history.history
loss = model.history.history['loss']
sns.lineplot(x=range(len(loss)),y=loss)
plt.title("Training Loss per Epoch");
###Output
_____no_output_____
###Markdown
Compare final evaluation (MSE) on training set and test set.These should hopefully be fairly close to each other.
###Code
model.metrics_names
training_score = model.evaluate(X_train,y_train,verbose=0)
test_score = model.evaluate(X_test,y_test,verbose=0)
training_score
test_score
###Output
_____no_output_____
###Markdown
Further Evaluations
###Code
test_predictions = model.predict(X_test)
test_predictions
pred_df = pd.DataFrame(y_test,columns=['Test Y'])
pred_df
test_predictions = pd.Series(test_predictions.reshape(300,))
test_predictions
pred_df = pd.concat([pred_df,test_predictions],axis=1)
pred_df.columns = ['Test Y','Model Predictions']
pred_df
###Output
_____no_output_____
###Markdown
Let's compare to the real test labels!
###Code
sns.scatterplot(x='Test Y',y='Model Predictions',data=pred_df)
pred_df['Error'] = pred_df['Test Y'] - pred_df['Model Predictions']
sns.distplot(pred_df['Error'],bins=50)
from sklearn.metrics import mean_absolute_error,mean_squared_error
mean_absolute_error(pred_df['Test Y'],pred_df['Model Predictions'])
mean_squared_error(pred_df['Test Y'],pred_df['Model Predictions'])
# Essentially the same thing, difference just due to precision
test_score
#RMSE
test_score**0.5
###Output
_____no_output_____
###Markdown
Predicting on brand new dataWhat if we just saw a brand new gemstone from the ground? What should we price it at? This is the **exact** same procedure as predicting on a new test data!
###Code
# [[Feature1, Feature2]]
new_gem = [[998,1000]]
# Don't forget to scale!
scaler.transform(new_gem)
new_gem = scaler.transform(new_gem)
model.predict(new_gem)
###Output
_____no_output_____
###Markdown
Saving and Loading a Model
###Code
from tensorflow.keras.models import load_model
model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
later_model = load_model('my_model.h5')
later_model.predict(new_gem)
###Output
_____no_output_____ |
analyses/seasonality_paper_st/all/train_val_set.ipynb | ###Markdown
Setup
###Code
from specific import *
figure_saver = figure_saver(sub_directory="train_val_set")
map_figure_saver = map_figure_saver(sub_directory="train_val_set")
(
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
) = get_offset_data()
X_train, X_val, y_train, y_val = data_split_cache.load()
rf = get_model()
###Output
_____no_output_____
###Markdown
BA in the train and validation setsValid elements are situated where master_mask is False
###Code
masked_train_data = get_mm_data(y_train.values, master_mask, "train")
masked_val_data = get_mm_data(y_val.values, master_mask, "val")
for fname, title, kind in zip(
("train_set_mask", "val_set_mask"),
("Train Set Mask", "Validation Set Mask"),
("train", "val"),
):
mask = get_mm_data(1, master_mask, kind)
mask.mask = master_mask.copy()
with map_figure_saver(fname):
cube_plotting(
mask,
title=title,
nbins=12,
cmap="viridis",
colorbar_kwargs={"label": "Fraction Present"},
)
with map_figure_saver("train_val_set_overall_ba_comp"):
fig, axes = plt.subplots(
3,
1,
constrained_layout=True,
figsize=(5.1, 8.4),
subplot_kw={"projection": ccrs.Robinson()},
)
shared_kwargs = {
"boundaries": [0, 4e-6, 1e-5, 1e-4, 1e-3, 1e-2, 8e-2],
"extend": "max",
"cmap": "inferno",
"colorbar_kwargs": {"format": "%0.1e", "label": "Fractional BA"},
"coastline_kwargs": {"linewidth": 0.3},
"title": "",
}
axes[0].set_title("Mean Overall GFED4 BA")
cube_plotting(
get_masked_array(endog_data.values, master_mask),
ax=axes[0],
fig=fig,
**shared_kwargs
)
axes[1].set_title("Mean Train Set GFED4 BA")
cube_plotting(masked_train_data, ax=axes[1], fig=fig, **shared_kwargs)
axes[2].set_title("Mean Validation Set GFED4 BA")
cube_plotting(masked_val_data, ax=axes[2], fig=fig, **shared_kwargs)
with map_figure_saver("train_val_set_difference"):
cube_plotting(
np.mean(masked_train_data, axis=0) - np.mean(masked_val_data, axis=0),
cmap="RdBu_r",
nbins=9,
log=True,
min_edge=1e-2,
cmap_midpoint=0,
cmap_symmetric=True,
colorbar_kwargs={"format": "%0.1e", "label": "Fractional BA"},
coastline_kwargs={"linewidth": 0.3},
title="<Train> - <Validation>",
fig=plt.figure(figsize=(5.1, 2.8)),
)
with map_figure_saver("train_val_set_rel_difference"):
cube_plotting(
(np.mean(masked_train_data, axis=0) - np.mean(masked_val_data, axis=0))
/ np.mean(get_masked_array(endog_data.values, master_mask), axis=0),
cmap="RdBu_r",
nbins=9,
log=True,
min_edge=1e-1,
cmap_midpoint=0,
cmap_symmetric=True,
colorbar_kwargs={"format": "%0.1e", "label": "Fractional BA"},
coastline_kwargs={"linewidth": 0.3},
title="(<Train> - <Validation>) / <GFED4>",
fig=plt.figure(figsize=(5.1, 2.8)),
)
print("<Train> - <Validation>:", np.mean(masked_train_data) - np.mean(masked_val_data))
###Output
_____no_output_____ |
code/astro/astro_notebook.ipynb | ###Markdown
Notebook for Solar Wind ExplorationIn the initial phase, we want to see if we can detect FTEs using unsupervised learning, by finding a manifold for the solar wind data.The initial hypothesis is the transition matrices (Markov Matrices $M$) that can be derived from Manifolder + clustering will show distinctive clusters and transitions. We can check accuracy by looking at the label (FTE or not?), and see if this label could have been deduced from the data itself.
###Code
# useful set of python includes
%load_ext autoreload
%autoreload 2
import numpy as np
np.set_printoptions(suppress=True, precision=4)
import matplotlib.pyplot as plt
%config InlineBackend.figure_format = 'svg'
import seaborn as sns
sns.set()
import pandas as pd
import time
import random
###Output
_____no_output_____
###Markdown
Load Solar Wind Data, and Run ManifolderThe `dataset_2` file contains Dataset-2 (THEMIS): a list with FTEs periods and non-FTEs periods observed by THEMIS in 2007. These are combined into one file, randomly FTE - NonFTE - FTE - FTE, NonFTE, etc…In total there are 63 FTEs and 47 non-FTEs.The time series are separated by one blank line, and each one has 1440 points in a period of 6 minutes.
###Code
import sys
sys.path.append(r"C:\Users\acloninger\GDrive\ac2528Backup\DocsFolder\GitHub\manifolder")
sys.path.append(r"..")
import manifolder as mr
from manifolder import helper as mh
# load the data
# note, you must have started the notebook in the
print('loading data ...')
df = pd.read_excel('astro_data/dataset_2.xlsx', index_col=0)
df.head()
# convert values from loaded spreadsheet, into a numpy matrices
# note that there is no need for the first value, which is time,
# as it is not part of the manifold
#
# also, note the spreadsheet is missing a column name for `Unnamed: 13`, and the values above
# this have the incorrect column labels; the first relevant vale is bx, which as a magnitude around 2
#
# note the final value of each row is the goal (0 or 1), and not part of z
data_raw = df.values[:, 1:]
print('first line of raw_data:\n', data_raw[0, :])
# loop through the data, breaking out the clusters
# i will always point to the NaN (blank line) in the dataframe,
# and values [i-1440:i] is the snipped
snippet_len = 1440
# collect all line breaks (blank lines) in csv file
#lineBreaks = [0]
#for i in range(data_raw.shape[0]):
# if data_raw[i,0] != data_raw[i,0]: # replacement of isnan, since nan != nan
# lineBreaks.append(i)
#lineBreaks.append(data_raw.shape[0])
#
#num_snippet = len(lineBreaks)-1
# callect the snippets into two groups, one for each goal (target) value, 0 or 1
# these can be easily merged
zs_0 = []
zs_1 = []
df.values[0,:]
for i in range(snippet_len,data_raw.shape[0],snippet_len+1):
# copy the snipped, excluding the last value, which is the goal
snippet = data_raw[i-snippet_len:i,:-1]
# grab the goal value from the first row of each snippet
goal = data_raw[i-snippet_len,-1]
# check to make sure each snippet does not contain NaN
# (should not, if parsing is correct)
assert ~np.isnan(snippet).any(), 'oops, snippet contains a Nan!'
print('snippet size',snippet.shape,'with goal',goal)
if goal == 0:
zs_0.append( snippet )
elif goal == 1:
zs_1.append( snippet )
else:
assert False, 'value of goal not understood'
# shuffle this lists; this should not strictly be necessary, if all the data is being used,
# but prevents biases when shortening the list
random.shuffle(zs_0)
random.shuffle(zs_1)
shorten_data = False
if shorten_data:
zs_0 = zs_0[:10]
zs_1 = zs_1[:10]
zs = zs_0 + zs_1
z_breakpoint = len(zs_0)
print( '\done!')
print( '\t len(zs_0):',len(zs_0))
print( '\t len(zs_1):',len(zs_1))
print( '\t len(zs):',len(zs))
import matplotlib.pyplot as plt
plt.figure()
for i in range(9):
plt.subplot(3,3,i+1)
plt.plot(zs_0[i])
plt.show()
plt.figure()
for i in range(9):
plt.subplot(3,3,i+1)
plt.plot(zs_1[i])
plt.show()
# data has been parsed, now run Manifolder
H = 80
step_size = 10
nbins = 10
ncov = 20
start_time = time.time()
# create manifolder object
manifolder = mr.Manifolder(H=H,step_size=step_size,nbins=nbins, ncov=ncov)
# add the data, and fit (this runs all the functions)
manifolder.fit_transform(zs, parallel=True, use_dtw=False)
elapsed_time = time.time() - start_time
print('\n\t Program Executed in', str(np.round(elapsed_time, 2)), 'seconds') # about 215 seconds (four minutes)
start_time = time.time()
manifolder._clustering(numClusters=7, kmns=False, distance_measure=None) # display
print(manifolder.IDX.shape)
elapsed_time = time.time() - start_time
print('\n\t Program Executed for k means clustering in', str(np.round(elapsed_time, 2)), 'seconds')
# clustering data for k-means...
IDX = manifolder.IDX
cluster_lens = mh.count_cluster_lengths(IDX)
# cluster_lens is a dictionary a dictonary, where each key is the cluster number (0:6),
# and the values are a list of cluster lengths
mh.show_cluster_lens(cluster_lens)
# in this case, index goes from 0 to 6 ...
# can also have outlier groups in kmeans, need to check for this
print(IDX.shape)
print(np.min(IDX))
print(np.max(IDX))
IDX_max = np.max(IDX)
M = mh.make_transition_matrix(IDX)
print('\n transition matrix:')
print(M)
M_z0 = mh.make_transition_matrix(IDX[manifolder.snip_number<z_breakpoint])
M_z1 = mh.make_transition_matrix(IDX[manifolder.snip_number>=z_breakpoint])
print('\n z0 transition matrix:')
print(M_z0)
print('\n z1 transition matrix:')
print(M_z1)
z_downsample = np.empty((0,zs[0].shape[1]+1), float)
for i in range(len(zs)):
x = zs[i]
x = x[0:x.shape[0]-H,:]
x = x[::step_size]
if i<z_breakpoint:
x = np.append(x,np.zeros((x.shape[0],1)),1)
else:
x = np.append(x,np.ones((x.shape[0],1)),1)
z_downsample = np.append(z_downsample,x,0)
z_downsample = np.append(z_downsample, manifolder.snip_number.reshape(len(IDX),1), 1)
z_downsample = np.append(z_downsample, IDX.reshape(len(IDX),1), 1)
z_downsample.shape
np.savetxt('astro_subset2_clustering_k=10.csv', z_downsample, delimiter=',', fmt='%f')
###Output
_____no_output_____
###Markdown
TODO:After running `fit_transform()`, use kmeans to label clusters withing all the snippetsCreate a transition matrix for each snippet; the zs_0 and zs_1 should have distincively different matrices, which can be used to categorize the snippet
###Code
### _cluster() function, local to make it easier to work on
### ... note, all the individual clusters should be marked invidually?
### but the original kmeans run run all of them together?
###
# Configuration
numClusters = 7 # NOTE, this was previously 14 (too many!)
intrinsicDim = manifolde.Dim # can be varied slightly but shouldn't be much larger than Dim
## Clusters
# IDX = kmeans(Psi(:, 1:intrinsicDim), numClusters)
# Python kmeans see
# https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.cluster.vq.kmeans.html
# scipy.cluster.vq.kmeans(obs, k_or_guess, iter=20, thresh=1e-05)
#
# note, python expects each ROW to be an observation, looks the same a matlap
#
print('running k-means')
kmeans = KMeans(n_clusters=numClusters).fit(manifolder.Psi[:, :intrinsicDim])
IDX = kmeans.labels_
# TODO decide how to plot multiple snips
# think that x_ref[1,:] is just
for snip in range(len(self.z)):
if snip == 0:
x = self.z[snip][0, :]
xref1 = x[::self.stepSize] # downsample, to match the data steps
else:
x = self.z[snip][0, :]
x = x[::self.stepSize]
xref1 = np.append(xref1, x)
print(xref1.shape)
xs = manifolder.Psi[:, 0]
ys = manifolder.Psi[:, 1]
zs = manifolder.Psi[:, 2]
# normalize these to amplitude one?
print('normalizing amplitudes of Psi in Python ...')
xs /= np.max(np.abs(xs))
ys /= np.max(np.abs(ys))
zs /= np.max(np.abs(zs))
# xs -= np.mean(xs)
# ys -= np.mean(ys)
# zs -= np.mean(zs)
# xs /= np.std(xs)
# ys /= np.std(ys)
# zs /= np.std(zs)
print(xs.shape)
lim = 2000
val = xref1[:lim]
idx = manifolder.IDX[:lim]
plt.figure(figsize=[15, 3])
plt.plot(xref1[:lim], color='black', label='Timeseries')
# plt.plot(xs[:lim], linewidth=.5, label='$\psi_0$')
# plt.plot(ys[:lim], linewidth=.5, label='$\psi_1$')
# plt.plot(zs[:lim], linewidth=.5, label='$\psi_2$')
plt.plot(xs[:lim], linewidth=.5, label='psi_0')
plt.plot(ys[:lim], linewidth=.5, label='psi_1')
plt.plot(zs[:lim], linewidth=.5, label='psi_2')
plt.plot(idx / np.max(idx) + 1, linewidth=.8, label='IDX')
plt.legend()
# rightarrow causes an image error, when displayed in github!
# plt.xlabel('Time $ \\rightarrow $')
plt.xlabel('Time')
plt.ylabel('Value')
# plt.gca().autoscale(enable=True, axis='both', tight=None )
# plt.gca().xaxis.set_ticklabels([])
# plt.gca().yaxis.set_ticklabels([])
plt.title('Example Timeseries and Manifold Projection')
print('done')
###
### additional parsing, for color graphs
###
import matplotlib
cmap = matplotlib.cm.get_cmap('Spectral')
r = xs[:lim]
g = ys[:lim]
b = zs[:lim]
# prevent the jump in data value
r[:self.H] = r[self.H]
g[:self.H] = g[self.H]
b[:self.H] = b[self.H]
r -= np.min(r)
r /= np.max(r)
g -= np.min(g)
g /= np.max(g)
b -= np.min(b)
b /= np.max(b)
plt.figure(figsize=[15, 3])
for i in range(lim - 1):
col = [r[i], g[i], b[i]]
plt.plot([i, i + 1], [val[i], val[i + 1]], color=col)
plt.title('data, colored according to Psi (color three-vector)')
plt.xlabel('Time')
plt.ylabel('Value')
plt.show()
# clustering data ...
IDX = manifolder.IDX
cluster_lens = mh.count_cluster_lengths(IDX)
# cluster_lens is a dictionary a dictonary, where each key is the cluster number (0:6),
# and the values are a list of clusner lengths
mh.show_cluster_lens(cluster_lens)
###Output
_____no_output_____
###Markdown
Graph Transition (Markov) MatrixThe system can be though of as in one particular "state" (cluster value) at any given time. This state $S$ can be though of as a column vector with $C$ dimensions, similar to states in quantum mechanic, where the column vector plays the role of the transition matrix.Time evolution is this given by the tranistion matrix $M$, which is a Markov matrix (all columns sum to one, to preserve probability). In this case, we have$$S_{n+1} = M @ S_n $$Where the $@$ symbol is used to explicitly denote matrix multiplication.Since most clusters with transition to themselves, the diagonal values of the matrix can be quite high, and are typically removed. Thus, for visualization, we remove the diagonal elements of the matrix.
###Code
# in this case, index goes from 0 to 6 ...
# can also have outlier groups in kmeans, need to check for this
print(IDX.shape)
print(np.min(IDX))
print(np.max(IDX))
IDX_max = np.max(IDX)
M = mh.make_transition_matrix(IDX)
print('\n transition matrix:')
print(M)
# reorder transition matrix, from most to least common cluster
# diagonal elements monotonically decreasing
IDX_ordered = mh.reorder_cluster(IDX, M)
M = mh.make_transition_matrix(IDX_ordered)
print('\n transition matrix, ordered:')
print(M)
mh.image_M(M)
# remove diagonal, and make markov, for display
print('transition matrix, diagonal elements removed, normalized (Markov)')
np.fill_diagonal(M, 0) # happens inplace
M = mh.make_matrix_markov(M)
print(M)
mh.image_M(M, 1)
###Output
_____no_output_____ |
notebooks/Text Mining Part 2.ipynb | ###Markdown
Natural Language Processing -- Part 2 Now that we have some basics of text processing, and retrieving basic information such as named entities and part-of-speech tags, we can look into more advanced modelling for information retreival.To be able to use modelling to extract meaning and information from text, you need a numerical representation of your texts. So we transform our words to vectors. One hot encodingThe simpelest way of making a vector representation is by using one-hot ecoding. In one hot encoding you give a number to each unique word you have in your corpus. For example, let's say my entire dataset consist of these two sentences:
###Code
sentences = ["We will stick to natural language processing in this class",
"Bert found a stick in the forest"]
words = [w.lower() for sentence in sentences for w in sentence.split(' ')]
words = list(np.unique(words))
words.sort()
print(len(words),'unique words')
words
# I can give each of these words a number
word_index ={}
for i, w in enumerate(words):
word_index[w] = i
print(w, ':', i)
###Output
a : 0
bert : 1
class : 2
forest : 3
found : 4
in : 5
language : 6
natural : 7
processing : 8
stick : 9
the : 10
this : 11
to : 12
we : 13
will : 14
###Markdown
Each of these words now has a one-hot encoded represenation, which are vectors that are **13** zeros, with **one** 1, at the index of the word. So, Bert would have:
###Code
v_bert = np.zeros((len(words),)).astype(int)
v_bert[word_index['bert']] = 1
v_bert
###Output
_____no_output_____
###Markdown
And for language it would be:
###Code
v_lang = np.zeros((len(words),)).astype(int)
v_lang[word_index['language']] = 1
v_lang
###Output
_____no_output_____
###Markdown
We an also use this method to create a representation of our sentences.
###Code
vectors = []
for sentence in sentences:
vector = np.zeros((len(words), )).astype(int)
idx = [word_index[w.lower()] for w in sentence.split(' ')]
vector[idx] = 1
vectors.append(vector)
print(sentence)
print('\t', vector)
print()
###Output
We will stick to natural language processing in this class
[0 0 1 0 0 1 1 1 1 1 0 1 1 1 1]
Bert found a stick in the forest
[1 1 0 1 1 1 0 0 0 1 1 0 0 0 0]
###Markdown
Stacking these into a matrix, is called a document term count. The 'documents' are then the sentences. This is called a **Document Term Matrix** (DTM) or **Document Term Count** (DTC).
###Code
np.stack(vectors)
###Output
_____no_output_____
###Markdown
Obviously this is a small dataset, so the 'matrix' is tiny. In addtition the name *Docment Term Count* implies that it shows a count of the number of words. SInce our example sentences (or documents) only have unique words in them, the count is always one. So let's look at a larged dataset. Luckly, we can use different pre existing python packages to create a document term matrix, without doing all the manual steps like we did above.For this example I will be using scikit-learn (sklearn) and nltk.
###Code
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.datasets import fetch_20newsgroups
from nltk.stem import SnowballStemmer
from nltk.corpus import stopwords
# Grab an example dataset of old newsgroups
newsgroups = fetch_20newsgroups()
print(newsgroups.data[0])
print(f"There are {len(newsgroups.data)} newsgroup postings in this dataset")
###Output
From: [email protected] (where's my thing)
Subject: WHAT car is this!?
Nntp-Posting-Host: rac3.wam.umd.edu
Organization: University of Maryland, College Park
Lines: 15
I was wondering if anyone out there could enlighten me on this car I saw
the other day. It was a 2-door sports car, looked to be from the late 60s/
early 70s. It was called a Bricklin. The doors were really small. In addition,
the front bumper was separate from the rest of the body. This is
all I know. If anyone can tellme a model name, engine specs, years
of production, where this car is made, history, or whatever info you
have on this funky looking car, please e-mail.
Thanks,
- IL
---- brought to you by your neighborhood Lerxst ----
There are 11314 newsgroup postings in this dataset
###Markdown
We now use the `CountVectorizer` from sklearn to create a *DTC*. To ensure that we don't create a giant matrix, and we only want relevant words we use some techniques from the previous section as preprocessig steps: - We filter out stopwords - We set a minumum word count per document to no lower than 2 - We set a maximum appearance of a word to 90% of all documents - We stem our words to combine different conjugations
###Code
# Define the stemmer
stemmer = SnowballStemmer(language='english')
# Define a list of stopwords. Note we need to stem these as well!
input_stopwords = [stemmer.stem(w) for w in stopwords.words('english')]
# Define the vectorizer
vectorizer = CountVectorizer(preprocessor=stemmer.stem, stop_words=input_stopwords, min_df=2,
max_df=0.9,
token_pattern=r"[a-zA-Z]{2,}")
dtc = vectorizer.fit_transform(newsgroups.data)
print(len(newsgroups.data))
dtc.shape
vectorizer.get_feature_names()[:20]
doc_nr = 0
words = np.array(vectorizer.get_feature_names())[dtc.toarray()[doc_nr, :]>1]
counts = dtc.toarray()[doc_nr, :][dtc.toarray()[doc_nr, :]>1]
for i, word in enumerate(words):
print(word, counts[i])
newsgroups.data[doc_nr]
from sklearn.metrics.pairwise import cosine_similarity
similar = cosine_similarity(dtc)
sim_dtc = similar[0, :].argsort()[::-1][:20]
df_dtc = pd.DataFrame(dtc[sim_dtc, :].toarray())
df_dtc[df_dtc==0] = float('nan')
df_dtc = df_dtc.dropna(axis=1, how='all')
df_dtc.columns = np.array(vectorizer.get_feature_names())[df_dtc.columns.values]
df_dtc.index = sim_dtc
most_common = df_dtc.sum(0).sort_values(ascending=False)[:20].index.values
df_dtc = df_dtc.loc[:, most_common]
import matplotlib.pyplot as plt
plt.figure(figsize=(20,8))
sns.clustermap(df_dtc.fillna(0).T, cmap='Blues')
###Output
_____no_output_____
###Markdown
TFIDFTf-idf stands for term frequency-inverse document frequency, and the tf-idf weight is a weight often used in information retrieval and text mining. This weight is a statistical measure used to evaluate how important a word is to a document in a collection or corpus. The importance increases proportionally to the number of times a word appears in the document but is offset by the frequency of the word in the corpus.source tfidf.com
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
# Define the stemmer
stemmer = SnowballStemmer(language='english')
# Define a list of stopwords. Note we need to stem these as well!
input_stopwords = [stemmer.stem(w) for w in stopwords.words('english')]
# Define the vectorizer
vectorizer = TfidfVectorizer(preprocessor=stemmer.stem, stop_words=input_stopwords, min_df=2,
max_df=0.9,
token_pattern=r"[a-zA-Z]{2,}")
tfidf = vectorizer.fit_transform(newsgroups.data)
print(len(newsgroups.data))
tfidf.shape
similar = cosine_similarity(tfidf)
sim_tfidf = similar[0, :].argsort()[::-1][:20]
df_tfidf = pd.DataFrame(tfidf[sim_tfidf, :].toarray())
df_tfidf[df_tfidf==0] = float('nan')
df_tfidf = df_tfidf.dropna(axis=1, how='all')
df_tfidf.columns = np.array(vectorizer.get_feature_names())[df_tfidf.columns.values]
df_tfidf.index = sim_tfidf
most_common = df_tfidf.sum(0).sort_values(ascending=False)[:20].index.values
df_tfidf = df_tfidf.loc[:, most_common]
plt.figure(figsize=(20,8))
sns.clustermap(df_tfidf.fillna(0).T, cmap='Blues')
###Output
_____no_output_____
###Markdown
Train a classifier using TFIDF
###Code
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
X = newsgroups.data
y = newsgroups.target
# Split into train and test set
X_train_text, X_test_text, y_train, y_test = train_test_split(X, y)
vectorizer = TfidfVectorizer(preprocessor=stemmer.stem, stop_words=input_stopwords, min_df=2,
max_df=0.9,
token_pattern=r"[a-zA-Z]{2,}")
X_train = vectorizer.fit_transform(X_train_text)
X_test = vectorizer.transform(X_test_text)
# Train classifier
model = RandomForestClassifier()
model.fit(X_train, y_train)
# Predict on test class
y_pred = model.predict(X_test)
y_proba = model.predict_proba(X_test)
# Show scores
print("accuracy", (y_pred == y_test).mean())
print('F1', f1_score(y_test, y_pred, average='weighted'))
print('recall', recall_score(y_test, y_pred, average='weighted'))
print('precision', precision_score(y_test, y_pred, average='weighted'))
###Output
accuracy 0.8469423824673029
F1 0.8454934852767634
recall 0.8469423824673029
precision 0.8536500645165409
###Markdown
Plot the confusion matrix
###Code
mat = confusion_matrix(y_test, y_pred) * (np.identity(len(newsgroups.target_names))!=1).astype(int)
mat = pd.DataFrame(data=mat, columns=newsgroups.target_names, index=newsgroups.target_names)
plt.figure(figsize=(15, 10))
sns.heatmap(mat, cmap='Blues')
plt.xlabel("Prediction", fontsize=20);
plt.ylabel("True Label", fontsize=20);
###Output
_____no_output_____
###Markdown
We can also plot the vectors in a 2-D space
###Code
from sklearn.manifold import TSNE
target_names = {i:k for i, k in enumerate(newsgroups.target_names)}
x_plot = TSNE().fit_transform(X_test)
df_v = pd.DataFrame(x_plot, columns=['x','y'])
df_v['target'] = [target_names.get(x) for x in y_test]
df_v['pred'] = [target_names.get(x) for x in y_pred]
df_v.head()
plt.figure(figsize=(20,10))
plt.subplot(121)
sns.scatterplot(data=df_v.sort_values('pred'), x='x', y='y', hue='pred')
plt.title('Predictions');
plt.xticks([]); plt.yticks([]);
plt.subplot(122)
sns.scatterplot(data=df_v.sort_values('target'), x='x', y='y', hue='target', legend=None)
plt.title('Labels');
plt.xticks([]); plt.yticks([]);
###Output
_____no_output_____
###Markdown
Word 2 vec
###Code
print(sentences)
vectors
###Output
['We will stick to natural language processing in this class', 'Bert found a stick in the forest']
###Markdown
Source: https://mc.ai/deep-nlp-word-vectors-with-word2vec/ There are two types of W2V models, CBOW (Continuous Bag of Words) and Skip-gram CBOW The CBOW method, tries to predict the target word based on the context.**We will stick to Natural language processing in this class**Average of context words:('we', 'will' 'to', 'natural') -> 'stick'**CBOW is faster and has slightly better accuracy for frequent words** Skip-gramThe skip-gram method tries to predict the context, based on the input word.**We will stick to Natural language processing in this class**Word 'stick' -> ('we', 'will' 'to', 'natural')**Skip-gram works well with small datasets, and has better representation for rare words or phrases** Training a w2v modelA w2v model can easily be trained using the gensim package
###Code
from nltk.tokenize import PunktSentenceTokenizer, RegexpTokenizer
from nltk.stem import SnowballStemmer
from gensim.models import Word2Vec
stem = SnowballStemmer('english')
sentok = PunktSentenceTokenizer()
tok = RegexpTokenizer(r"[a-zA-Z]{2,}")
sentences = [sentok.tokenize(x.lower()) for x in newsgroups.data]
sentences = [tok.tokenize(s) for sent in sentences for s in sent]
stemmed = [[stem.stem(s) for s in sent] for sent in sentences]
model = Word2Vec(sentences, min_count=5, sg=0, window=5)
model.wv.most_similar('computers', topn=10)
model = Word2Vec(stemmed, min_count=5, sg=0, window=5)
model.wv.most_similar('comput', topn=10)
from sklearn.decomposition import PCA
words = ['research', 'space', 'medical']
N = 10
un_words = []
basew = []
for word in words:
un_words.extend([x[0] for x in model.wv.most_similar(stem.stem(word), topn=N)])
un_words.append(word)
basew.extend([word for n in range(N+1)])
X = PCA(2).fit_transform(np.stack([model.wv[stem.stem(w)] for w in un_words]))
df = pd.DataFrame(X, columns=['x', 'y'])
df['word'] = un_words
df['baseword'] = basew
plt.figure(figsize=(15, 8))
sns.scatterplot(data=df, x='x', y='y', hue='baseword', palette='Set2')
for row in df.itertuples():
plt.annotate(row.word, (row.x, row.y))
###Output
_____no_output_____
###Markdown
Topic modellingThere are different ways of obtaining topics from a corpus.- **Supervised** - Classification: Like the tfidf example we saw above. However this requires you to know the topics beforehand, and train a classifier to recognize them.- **Semi-supervised** - Query: Using a vectorizer like the tfidf method, you can use similarity metrics to find documents close to a query- **Unsupervised** - LDA: Unsupervised methods use propabilistic calculations to find common topics in text. LDA source: https://medium.com/@lettier/how-does-lda-work-ill-explain-using-emoji-108abf40fa7d source: https://medium.com/@lettier/how-does-lda-work-ill-explain-using-emoji-108abf40fa7d There are two main hypterparameters that are important in LDA.**The alpha** Controls how many topics are in one document> Turn it *down*, and the documents is more likely to have one or few topics. > Turn it *up*, and the documents will likely have more of a mixture of topics.**The beta** (or eta in gensim) hyperparameter controls the distribution of words per topic. > Turn it *down*, and the topics will likely have less words. > Turn it *up*, and the topics will likely have more words.
###Code
from sklearn.decomposition import NMF
from gensim.models import LdaModel
from gensim.corpora import Dictionary
fake_documents = [['bread']*10,
['avocado']*10,
['bacon']*10,
[*['bacon']*10, *['avocado']*10, *['bread']*10]]
dictionary = Dictionary(fake_documents)
corpus = [dictionary.doc2bow(doc) for doc in fake_documents]
lda = LdaModel(corpus, num_topics=3, id2word=dictionary, passes=100, alpha=.33, eta=.01)
print(lda.print_topics(num_words=1),'\n')
for i, doc in enumerate(corpus):
t1, t2, t3 = lda.get_document_topics(doc)
print('Document', i)
print(dictionary.id2token[t1[0]], np.round(t1[1], 2),
dictionary.id2token[t2[0]], np.round(t2[1], 2),
dictionary.id2token[t3[0]], np.round(t3[1], 2))
###Output
[(0, '0.500*"bread"'), (1, '0.999*"bacon"'), (2, '0.333*"bacon"')]
Document 0
bread 0.94 avocado 0.03 bacon 0.03
Document 1
bread 0.94 avocado 0.03 bacon 0.03
Document 2
bread 0.03 avocado 0.94 bacon 0.03
Document 3
bread 0.66 avocado 0.33 bacon 0.01
###Markdown
Let's try and run this on the newsgroups dataset
###Code
# Tokenize the text
tok = RegexpTokenizer(r"[a-zA-Z]{3,}")
documents = [tok.tokenize(text.lower()) for text in newsgroups.data]
#check to see we're keeping the right shape
assert len(documents) == len(newsgroups.data)
dictionary = Dictionary(documents)
dictionary.filter_extremes(no_above=.1, no_below=150)
stopword_ids = [dictionary.token2id.get(w, None) for w in input_stopwords]
dictionary.filter_tokens(bad_ids=stopword_ids)
corpus = [dictionary.doc2bow(doc) for doc in documents]
lda = LdaModel(corpus, id2word=dictionary, num_topics = len(target_names), passes=10, chunksize=200,
alpha=.4, eta=.8)
for i in range(20):
terms = np.array(lda.get_topic_terms(i, topn=8))
print(f'Topic {i}')
print('\t',' '.join([str(dictionary.id2token[t[0]]) + '('+str(np.round(t[1], 2))+')' for t in terms]))
pred = []
for i, doc in enumerate(corpus):
pred.append(pd.DataFrame(lda.get_document_topics(doc)).set_index(0).idxmax().values[0])
df = pd.DataFrame([pred, newsgroups.target]).T
df.columns = ['pred', 'label']
df['label'] = df['label'].replace(target_names)
df['count'] = 1
df.head()
x, y = np.unique(pred, return_counts=True)
x2, y2 = np.unique(newsgroups.target, return_counts=True)
plt.figure(figsize=(20,8))
sns.barplot(data=df, y="count", x='pred', hue='label', estimator=sum)
###Output
_____no_output_____ |
60_Registration_Introduction.ipynb | ###Markdown
Introduction to SimpleITKv4 RegistrationSimpleITK conventions:Dimensionality and pixel type of registered images is required to be the same (2D/2D or 3D/3D).Supported pixel types are sitkFloat32 and sitkFloat64 (use the SimpleITK Cast() function if your image's pixel type is something else). Registration Components There are many options for creating an instance of the registration framework, all of which are configured in SimpleITK via methods of the ImageRegistrationMethod class. This class encapsulates many of the components available in ITK for constructing a registration instance.Currently, the available choices from the following groups of ITK components are: OptimizersThe SimpleITK registration framework supports several optimizer types via the SetMetricAsX() methods, these include: Exhaustive Nelder-Mead downhill simplex, a.k.a. Amoeba. Variations on gradient descent: GradientDescent GradientDescentLineSearch RegularStepGradientDescent ConjugateGradientLineSearch L-BFGS-B (Limited memory Broyden, Fletcher,Goldfarb,Shannon-Bound Constrained) - supports the use of simple constraints ($l\leq x \leq u$) Similarity metricsThe SimpleITK registration framework supports several metric types via the SetMetricAsX() methods, these include: MeanSquares Demons Correlation ANTSNeighborhoodCorrelation JointHistogramMutualInformation MattesMutualInformation InterpolatorsThe SimpleITK registration framework supports several interpolators via the SetInterpolator() method, which receives one ofthe following enumerations: sitkNearestNeighbor sitkLinear sitkBSpline sitkGaussian sitkHammingWindowedSinc sitkCosineWindowedSinc sitkWelchWindowedSinc sitkLanczosWindowedSinc sitkBlackmanWindowedSinc Data - Retrospective Image Registration EvaluationWe will be using part of the training data from the Retrospective Image Registration Evaluation (RIRE) project.
###Code
import SimpleITK as sitk
# Utility method that either downloads data from the MIDAS repository or
# if already downloaded returns the file name for reading from disk (cached data).
from downloaddata import fetch_data as fdata
# Always write output to a separate directory, we don't want to pollute the source directory.
import os
OUTPUT_DIR = 'Output'
###Output
_____no_output_____
###Markdown
Utility functionsA number of utility callback functions for image display and for ploting the similarity metric during registration.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
from ipywidgets import interact, fixed
from IPython.display import clear_output
# Callback invoked by the interact ipython method for scrolling through the image stacks of
# the two images (moving and fixed).
def display_images(fixed_image_z, moving_image_z, fixed_npa, moving_npa):
# Create a figure with two subplots and the specified size.
plt.subplots(1,2,figsize=(10,8))
# Draw the fixed image in the first subplot.
plt.subplot(1,2,1)
plt.imshow(fixed_npa[fixed_image_z,:,:],cmap=plt.cm.Greys_r);
plt.title('fixed image')
plt.axis('off')
# Draw the moving image in the second subplot.
plt.subplot(1,2,2)
plt.imshow(moving_npa[moving_image_z,:,:],cmap=plt.cm.Greys_r);
plt.title('moving image')
plt.axis('off')
plt.show()
# Callback invoked by the IPython interact method for scrolling and modifying the alpha blending
# of an image stack of two images that occupy the same physical space.
def display_images_with_alpha(image_z, alpha, fixed, moving):
img = (1.0 - alpha)*fixed[:,:,image_z] + alpha*moving[:,:,image_z]
plt.imshow(sitk.GetArrayFromImage(img),cmap=plt.cm.Greys_r);
plt.axis('off')
plt.show()
# Callback invoked when the StartEvent happens, sets up our new data.
def start_plot():
global metric_values, multires_iterations
metric_values = []
multires_iterations = []
# Callback invoked when the EndEvent happens, do cleanup of data and figure.
def end_plot():
global metric_values, multires_iterations
del metric_values
del multires_iterations
# Close figure, we don't want to get a duplicate of the plot latter on.
plt.close()
# Callback invoked when the IterationEvent happens, update our data and display new figure.
def plot_values(registration_method):
global metric_values, multires_iterations
metric_values.append(registration_method.GetMetricValue())
# Clear the output area (wait=True, to reduce flickering), and plot current data
clear_output(wait=True)
# Plot the similarity metric values
plt.plot(metric_values, 'r')
plt.plot(multires_iterations, [metric_values[index] for index in multires_iterations], 'b*')
plt.xlabel('Iteration Number',fontsize=12)
plt.ylabel('Metric Value',fontsize=12)
plt.show()
# Callback invoked when the sitkMultiResolutionIterationEvent happens, update the index into the
# metric_values list.
def update_multires_iterations():
global metric_values, multires_iterations
multires_iterations.append(len(metric_values))
###Output
_____no_output_____
###Markdown
Read imagesWe first read the images, casting the pixel type to that required for registration (Float32 or Float64) and look at them.
###Code
fixed_image = sitk.ReadImage(fdata("training_001_ct.mha"), sitk.sitkFloat32)
moving_image = sitk.ReadImage(fdata("training_001_mr_T1.mha"), sitk.sitkFloat32)
interact(display_images, fixed_image_z=(0,fixed_image.GetSize()[2]-1), moving_image_z=(0,moving_image.GetSize()[2]-1), fixed_npa = fixed(sitk.GetArrayFromImage(fixed_image)), moving_npa=fixed(sitk.GetArrayFromImage(moving_image)));
###Output
_____no_output_____
###Markdown
Initial AlignmentUse the CenteredTransformInitializer to align the centers of the two volumes and set the center of rotation to the center of the fixed image.
###Code
initial_transform = sitk.CenteredTransformInitializer(fixed_image,
moving_image,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
moving_resampled = sitk.Resample(moving_image, fixed_image, initial_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelIDValue())
interact(display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), fixed = fixed(fixed_image), moving=fixed(moving_resampled));
###Output
_____no_output_____
###Markdown
RegistrationThe specific registration task at hand estimates a 3D rigid transformation between images of different modalities. There are multiple components from each group (optimizers, similarity metrics, interpolators) that are appropriate for the task. Note that each component selection requires setting some parameter values. We have made the following choices:Similarity metric, mutual information (Mattes MI): Number of histogram bins, 50. Sampling strategy, random. Sampling percentage, 1%.Interpolator, sitkLinear.Optimizer, gradient descent: Learning rate, step size along traversal direction in parameter space, 1.0 . Number of iterations, maximal number of iterations, 100. Convergence minimum value, value used for convergence checking in conjunction with the energy profile of the similarity metric that is estimated in the given window size, 1e-6. Convergence window size, number of values of the similarity metric which are used to estimate the energy profile of the similarity metric, 10.Perform registration using the settings given above, and take advantage of the built in multi-resolution framework, use a three tier pyramid. In this example we plot the similarity metric's value during regisration. Note that the change of scales in the multi-resolution framework is readily visible.
###Code
registration_method = sitk.ImageRegistrationMethod()
# Similarity metric settings.
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
# Optimizer settings.
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100, convergenceMinimumValue=1e-6, convergenceWindowSize=10)
registration_method.SetOptimizerScalesFromPhysicalShift()
# Setup for the multi-resolution framework.
registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2,1,0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
# Don't optimize in-place, we would possibly like to run this cell multiple times.
registration_method.SetInitialTransform(initial_transform, inPlace=False)
# Connect all of the observers so that we can perform plotting during registration.
registration_method.AddCommand(sitk.sitkStartEvent, start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, end_plot)
registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent, update_multires_iterations)
registration_method.AddCommand(sitk.sitkIterationEvent, lambda: plot_values(registration_method))
final_transform = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
###Output
_____no_output_____
###Markdown
Post registration analysis Query the registration method to see the metric value and the reason the optimization terminated. The metric value allows us to compare multiple registration runs as there is a probabilistic aspect to our registration, we are using random sampling to estimate the similarity metric.Always remember to query why the optimizer terminated. This will help you understand whether termination is too early, either due to thresholds being too tight, early termination due to small number of iterations - numberOfIterations, or too loose, early termination due to large value for minimal change in similarity measure - convergenceMinimumValue)
###Code
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
###Output
_____no_output_____
###Markdown
Now visually inspect the results.
###Code
moving_resampled = sitk.Resample(moving_image, fixed_image, final_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelIDValue())
interact(display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), fixed = fixed(fixed_image), moving=fixed(moving_resampled));
###Output
_____no_output_____
###Markdown
If we are satisfied with the results, save them to file.
###Code
sitk.WriteImage(moving_resampled, os.path.join(OUTPUT_DIR, 'RIRE_training_001_mr_T1_resampled.mha'))
sitk.WriteTransform(final_transform, os.path.join(OUTPUT_DIR, 'RIRE_training_001_CT_2_mr_T1.tfm'))
###Output
_____no_output_____
###Markdown
Introduction to SimpleITKv4 RegistrationSimpleITK conventions:Dimensionality and pixel type of registered images is required to be the same (2D/2D or 3D/3D).Supported pixel types are sitkFloat32 and sitkFloat64 (use the SimpleITK Cast() function if your image's pixel type is something else). Registration Components There are many options for creating an instance of the registration framework, all of which are configured in SimpleITK via methods of the ImageRegistrationMethod class. This class encapsulates many of the components available in ITK for constructing a registration instance.Currently, the available choices from the following groups of ITK components are: OptimizersThe SimpleITK registration framework supports several optimizer types via the SetMetricAsX() methods, these include: Exhaustive Nelder-Mead downhill simplex, a.k.a. Amoeba. Variations on gradient descent: GradientDescent GradientDescentLineSearch RegularStepGradientDescent ConjugateGradientLineSearch L-BFGS-B (Limited memory Broyden, Fletcher,Goldfarb,Shannon-Bound Constrained) - supports the use of simple constraints ($l\leq x \leq u$) Similarity metricsThe SimpleITK registration framework supports several metric types via the SetMetricAsX() methods, these include: MeanSquares Demons Correlation ANTSNeighborhoodCorrelation JointHistogramMutualInformation MattesMutualInformation InterpolatorsThe SimpleITK registration framework supports several interpolators via the SetInterpolator() method, which receives one ofthe following enumerations: sitkNearestNeighbor sitkLinear sitkBSpline sitkGaussian sitkHammingWindowedSinc sitkCosineWindowedSinc sitkWelchWindowedSinc sitkLanczosWindowedSinc sitkBlackmanWindowedSinc Data - Retrospective Image Registration EvaluationWe will be using part of the training data from the Retrospective Image Registration Evaluation (RIRE) project.
###Code
import SimpleITK as sitk
# Utility method that either downloads data from the MIDAS repository or
# if already downloaded returns the file name for reading from disk (cached data).
from downloaddata import fetch_data as fdata
# Always write output to a separate directory, we don't want to pollute the source directory.
import os
OUTPUT_DIR = 'Output'
###Output
_____no_output_____
###Markdown
Utility functionsA number of utility callback functions for image display and for plotting the similarity metric during registration.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
from ipywidgets import interact, fixed
from IPython.display import clear_output
# Callback invoked by the interact ipython method for scrolling through the image stacks of
# the two images (moving and fixed).
def display_images(fixed_image_z, moving_image_z, fixed_npa, moving_npa):
# Create a figure with two subplots and the specified size.
plt.subplots(1,2,figsize=(10,8))
# Draw the fixed image in the first subplot.
plt.subplot(1,2,1)
plt.imshow(fixed_npa[fixed_image_z,:,:],cmap=plt.cm.Greys_r);
plt.title('fixed image')
plt.axis('off')
# Draw the moving image in the second subplot.
plt.subplot(1,2,2)
plt.imshow(moving_npa[moving_image_z,:,:],cmap=plt.cm.Greys_r);
plt.title('moving image')
plt.axis('off')
plt.show()
# Callback invoked by the IPython interact method for scrolling and modifying the alpha blending
# of an image stack of two images that occupy the same physical space.
def display_images_with_alpha(image_z, alpha, fixed, moving):
img = (1.0 - alpha)*fixed[:,:,image_z] + alpha*moving[:,:,image_z]
plt.imshow(sitk.GetArrayFromImage(img),cmap=plt.cm.Greys_r);
plt.axis('off')
plt.show()
# Callback invoked when the StartEvent happens, sets up our new data.
def start_plot():
global metric_values, multires_iterations
metric_values = []
multires_iterations = []
# Callback invoked when the EndEvent happens, do cleanup of data and figure.
def end_plot():
global metric_values, multires_iterations
del metric_values
del multires_iterations
# Close figure, we don't want to get a duplicate of the plot latter on.
plt.close()
# Callback invoked when the IterationEvent happens, update our data and display new figure.
def plot_values(registration_method):
global metric_values, multires_iterations
metric_values.append(registration_method.GetMetricValue())
# Clear the output area (wait=True, to reduce flickering), and plot current data
clear_output(wait=True)
# Plot the similarity metric values
plt.plot(metric_values, 'r')
plt.plot(multires_iterations, [metric_values[index] for index in multires_iterations], 'b*')
plt.xlabel('Iteration Number',fontsize=12)
plt.ylabel('Metric Value',fontsize=12)
plt.show()
# Callback invoked when the sitkMultiResolutionIterationEvent happens, update the index into the
# metric_values list.
def update_multires_iterations():
global metric_values, multires_iterations
multires_iterations.append(len(metric_values))
###Output
_____no_output_____
###Markdown
Read imagesWe first read the images, casting the pixel type to that required for registration (Float32 or Float64) and look at them.
###Code
fixed_image = sitk.ReadImage(fdata("training_001_ct.mha"), sitk.sitkFloat32)
moving_image = sitk.ReadImage(fdata("training_001_mr_T1.mha"), sitk.sitkFloat32)
interact(display_images, fixed_image_z=(0,fixed_image.GetSize()[2]-1), moving_image_z=(0,moving_image.GetSize()[2]-1), fixed_npa = fixed(sitk.GetArrayFromImage(fixed_image)), moving_npa=fixed(sitk.GetArrayFromImage(moving_image)));
###Output
_____no_output_____
###Markdown
Initial AlignmentUse the CenteredTransformInitializer to align the centers of the two volumes and set the center of rotation to the center of the fixed image.
###Code
initial_transform = sitk.CenteredTransformInitializer(fixed_image,
moving_image,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
moving_resampled = sitk.Resample(moving_image, fixed_image, initial_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelIDValue())
interact(display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), fixed = fixed(fixed_image), moving=fixed(moving_resampled));
###Output
_____no_output_____
###Markdown
RegistrationThe specific registration task at hand estimates a 3D rigid transformation between images of different modalities. There are multiple components from each group (optimizers, similarity metrics, interpolators) that are appropriate for the task. Note that each component selection requires setting some parameter values. We have made the following choices:Similarity metric, mutual information (Mattes MI): Number of histogram bins, 50. Sampling strategy, random. Sampling percentage, 1%.Interpolator, sitkLinear.Optimizer, gradient descent: Learning rate, step size along traversal direction in parameter space, 1.0 . Number of iterations, maximal number of iterations, 100. Convergence minimum value, value used for convergence checking in conjunction with the energy profile of the similarity metric that is estimated in the given window size, 1e-6. Convergence window size, number of values of the similarity metric which are used to estimate the energy profile of the similarity metric, 10.Perform registration using the settings given above, and take advantage of the built in multi-resolution framework, use a three tier pyramid. In this example we plot the similarity metric's value during registration. Note that the change of scales in the multi-resolution framework is readily visible.
###Code
registration_method = sitk.ImageRegistrationMethod()
# Similarity metric settings.
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
# Optimizer settings.
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100, convergenceMinimumValue=1e-6, convergenceWindowSize=10)
registration_method.SetOptimizerScalesFromPhysicalShift()
# Setup for the multi-resolution framework.
registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2,1,0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
# Don't optimize in-place, we would possibly like to run this cell multiple times.
registration_method.SetInitialTransform(initial_transform, inPlace=False)
# Connect all of the observers so that we can perform plotting during registration.
registration_method.AddCommand(sitk.sitkStartEvent, start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, end_plot)
registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent, update_multires_iterations)
registration_method.AddCommand(sitk.sitkIterationEvent, lambda: plot_values(registration_method))
final_transform = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
###Output
_____no_output_____
###Markdown
Post registration analysis Query the registration method to see the metric value and the reason the optimization terminated. The metric value allows us to compare multiple registration runs as there is a probabilistic aspect to our registration, we are using random sampling to estimate the similarity metric.Always remember to query why the optimizer terminated. This will help you understand whether termination is too early, either due to thresholds being too tight, early termination due to small number of iterations - numberOfIterations, or too loose, early termination due to large value for minimal change in similarity measure - convergenceMinimumValue)
###Code
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
###Output
_____no_output_____
###Markdown
Now visually inspect the results.
###Code
moving_resampled = sitk.Resample(moving_image, fixed_image, final_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelIDValue())
interact(display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), fixed = fixed(fixed_image), moving=fixed(moving_resampled));
###Output
_____no_output_____
###Markdown
If we are satisfied with the results, save them to file.
###Code
sitk.WriteImage(moving_resampled, os.path.join(OUTPUT_DIR, 'RIRE_training_001_mr_T1_resampled.mha'))
sitk.WriteTransform(final_transform, os.path.join(OUTPUT_DIR, 'RIRE_training_001_CT_2_mr_T1.tfm'))
###Output
_____no_output_____ |
01_BinarySearch.ipynb | ###Markdown
Binary Search Group Activity Today, we will play the guessing game. *Step 1.* Find your group mates1. Check the paying card I gave you. Your group mates for this activity are the students with cards that have the same value (e.g. all the students with a queen card form a group).1. Assign a role to each member 1. The game leader (1 student) 1. The record keeper (1 student) 1. Guesser (1 or more students)*Step 2.* Play the guessing game 4 times1. The game leader chooses a number between 1 and 100. This is the secret the other team members will try to guess.1. To keep everyone honest, the game leader writes the number on a piece of paper and hands it to the record keeper.1. Repeat until the secret number is guessed 1. Guessers say a number 1. The record keeper writes it down 1. The game leader says if the guessed number is too low, too high or just right.1. When the game ends, the record keeper records the number of guesses and the strategy the guessers used. Reflection1. What is the smallest number of guesses the guesser can make?2. What is the greatest number of guesses in the worst case needed for your strategy?3. What number should you try first to eliminate the highest number of possibilities?4. What about the second time and third time? Code the Guessing Game in Python The program generates a random number between 1 and 100. The program asks the user to guess the number until the user guesses correctly. For each guess, the program prints a hint to tell the guesser the guess is too high or too low.
###Code
import random
# generate a random counting number between 1 and 100 inclusive
number = random.randint(1, 101)
# keep guessing forever
while(True):
# ask the player for a number
answer = int(input("What number am I thinking of?"))
# TODO: write the conditions of the if statements
if ( True ):
print("Too low!")
elif ( True ):
print("Too high!")
else:
print("Well done! You guessed my number!")
# exits the loop
break
###Output
_____no_output_____
###Markdown
Code the search in Python The roles are changed. Now, the human thinks of a number and the computer guesses it.
###Code
# the current known lower limit for the guess
low = 1
# the current known upper limit for the guess
high = 100
while (True):
print(low, high)
# Set the guess to the best choice between low and high. Hint: you may have to use the operator //
# TODO: calculate the guess
guess = 0
print("Is", guess, "your number?")
print("1. Too high!")
print("2. Too low!")
print("3. Well done!")
selection = int(input("Introduce your selection: "))
print("selection", selection)
if ( selection == 1):
# TODO: update high
high = 0
elif ( selection == 2):
# TODO: update low
low = 0
elif ( selection == 3 ):
break
###Output
_____no_output_____ |
m03_data_visualization/m03_c02_imperative_visualization/m03_c02_imperative_visualization.ipynb | ###Markdown
MAT281 Aplicaciones de la Matemática en la Ingeniería Módulo 03 Clase 02: Visualización Imperativa Objetivos* Comprender el estilo de visualización imperativa.* Aplicar gráficos adecuados dependiendo de los datos. Contenidos* [Visualización Imperativa](imperative)* [Matplotlib](matplotlib)* [Gráfico a Gráfico](plot-plot) - [Gráfico de Barras](barplot) - [Gráfico de Líneas](lineplot) - [Scatter Plot](scatter-plot) - [Gráfico de Barras de Error](error-bar-plot) - [Countour Plot](countour-plot) - [Campo de Vectores](vector-field) Visualización ImperativaEste paradigma se focaliza en las instrucciones recibidas, ya que no abstrae las operaciones o codificaciones visuales. Algunas de sus características son:* Se especifica _Cómo_ se debe hacer algo.* Se deben especificar manualmente los pasos del trazado.* Especificación y ejecución entrelazadas.Coloquialmente se puede entender como que se debe decidir pixel a pixel lo que se desea mostrar. MatplotlibLo mejor para entender una librería es la explicación que entrega la misma comunidad, a continuación un extracto de la [página web](https://matplotlib.org) de matplotlib:_Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. Matplotlib can be used in Python scripts, the Python and IPython shells, the Jupyter notebook, web application servers, and four graphical user interface toolkits.__Matplotlib tries to make easy things easy and hard things possible. You can generate plots, histograms, power spectra, bar charts, errorcharts, scatterplots, etc., with just a few lines of code. For examples, see the sample plots and thumbnail gallery.__For simple plotting the pyplot module provides a MATLAB-like interface, particularly when combined with IPython. For the power user, you have full control of line styles, font properties, axes properties, etc, via an object oriented interface or via a set of functions familiar to MATLAB users._ Conceptos GeneralesEl trazado requiere acción en un rango de niveles, desde el más general (por ejemplo, 'contornear este arreglo 2D') hasta el más específico (por ejemplo, 'colorear un píxel de color rojo'). El propósito de esta librería es generar gráficos de manera rápida lo más fácil posible (con código de alto nivel), pero con la capacidad de usar código de bajo nivel cuando sea necesario.Por lo tanto, todo en matplotlib está organizado en una jerarquía. En la parte superior se encuentra el módulo `matplotlib.pyplot`. En este nivel, se utilizan funciones simples para agregar elementos de trazado (líneas, imágenes, texto, etc.) a los ejes actuales en la figura actual. El siguiente nivel en la jerarquía es el primer nivel de la interfaz orientada a objetos, en la que pyplot se usa solo para algunas funciones, como la creación de figuras, y el usuario crea y realiza un seguimiento explícito de los objetos de figuras y ejes. En este nivel, el usuario usa pyplot para crear figuras, y a través de esas figuras, se pueden crear uno o más objetos de ejes. Estos objetos de ejes se utilizan para la mayoría de las acciones de trazado.  FigureEs la visualización completa. _Figure_ realiza un seguimiento de todos los _Axes_ hijos y el _Canvas_. Una figura puede tener cualquier número de _Axes_, pero para ser útil debe tener al menos uno.La forma más fácil de crear una nueva _Figure_ es con pyplot:```pythonfig = plt.figure() an empty figure with no axesfig, ax_lst = plt.subplots(2, 2) a figure with a 2x2 grid of Axes``` AxesEsto es lo que se puede pensar como 'un gráfico', es la región de la imagen con el espacio de datos. Un _Figure_ dada puede contener muchos _Axes_, pero un objeto _Axe_ dado solo puede estar en un _Figure_. _Axes_ contiene dos (o tres en el caso de 3D) objetos _Axis_ que se ocupan de los límites de datos. Cada _Axe_ tiene un título, una etiqueta para el eje horizonal y una etiqueta para el eje vertical.La clase _Axes_ y sus funciones son el punto de entrada principal para trabajar con la interfaz orientada a objetos. AxisCorresponden a los ejes, algo así como líneas rectas. Se encargan de establecer los límites del gráfico y generar los ticks (las marcas en el eje) y los ticklabels (_strings_ que etiquetan los ticks). Gráfico a Gráfico
###Code
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.stats import multivariate_normal
%matplotlib inline
###Output
_____no_output_____
###Markdown
Gráfico de Barras
###Code
people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim')
y_pos = np.arange(len(people))
performance = 3 + 10 * np.random.rand(len(people))
error = np.random.rand(len(people))
# Another way without axes
fig = plt.figure(figsize=(20, 10))
plt.subplot(1, 2, 1)
plt.barh(y_pos, performance, xerr=error, align='center', color="g", alpha=0.4)
plt.yticks(y_pos, people)
plt.xlabel('Performance')
plt.subplot(1, 2, 2)
plt.bar(y_pos, performance, yerr=error, align='center', color="g", alpha=0.6)
plt.xticks(y_pos, people)
plt.xlabel('People')
plt.ylabel('Performance')
plt.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar gráfico de barras?* x: Debe ser datos del tipo nominal o ordinal.* y: Debe ser datos de tipo ordinal, posicional o cuantitativo.Evitar: gráfico de nominal vs nominal. Gráfico de Líneas
###Code
def f(t):
return np.exp(-t) * np.cos(2 * np.pi * t)
t1 = np.arange(0.0, 5.0, 0.5)
t2 = np.arange(0.0, 5.0, 0.02)
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(20, 10), sharex=True, sharey=True)
ax1.plot(t1, f(t1), "b")
ax1.grid()
ax1.set_title("arange step = 0.5")
ax2.plot(t2, f(t2), "b")
ax2.grid()
ax2.set_title("arange step = 0.02")
fig.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar gráfico de líneas?* x: Debe ser datos del tipo ordinal o cuantitativo.* y: Debe ser datos de tipo ordinal, posicional o cuantitativo. Scatter Plot
###Code
N = 100
r0 = 0.6
x = 0.9 * np.random.rand(N)
y = 0.9 * np.random.rand(N)
area = np.pi * (10 * np.random.rand(N)) ** 2 # 0 to 10 point radiuses
c = np.sqrt(area)
r = np.sqrt(x ** 2 + y ** 2)
cm1 = plt.cm.get_cmap('RdYlBu')
cm2 = plt.cm.get_cmap('Greys')
plt.figure(figsize=(20, 12))
area1 = np.ma.masked_where(r < r0, area)
area2 = np.ma.masked_where(r >= r0, area)
sc1 = plt.scatter(x, y, s=area1, marker='^', c=c, cmap=cm1)
plt.colorbar(sc1)
sc2 = plt.scatter(x, y, s=area2, marker='o', c=c, cmap=cm2)
plt.colorbar(sc2)
# Show the boundary between the regions:
theta = np.arange(0, np.pi / 2, 0.01)
plt.plot(r0 * np.cos(theta), r0 * np.sin(theta), "k:", lw=2.0)
plt.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar scatter plot?* x: Dato del tipo posicional o cuantitativo.* y: Dato del tipo posicional o cuantitativo. * z: Dato del tipo nominal u ordinal (opcional) ***OBSERVACION***: Si hay pocos puntos, también puede usarse para z datos de tipo posicional o cuantitativo. Gráfico de Barra de Error
###Code
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(20, 10))
x_error = 0.1 + 0.2*np.random.rand(len(x))
ax1.errorbar(x, y, xerr=x_error)
y_error = 0.1 + 0.2*np.random.rand(len(x))
ax2.errorbar(x, y, yerr=y_error)
fig.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar gráfico de barra de error?* x: Dato del tipo posicional o cuantitativo.* y: Dato del tipo posicional o cuantitativo. * z: Dato del tipo posicional o cuantitativo.Los valores de z tienen que tener las mismas unidades y. Countor Plot
###Code
x, y = np.mgrid[-3:3:.025, -2:2:.025]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x
pos[:, :, 1] = y
z1 = multivariate_normal.pdf(
pos,
mean=[-1.0, -1.0],
cov=[[1.0, 0.0], [0.0, 0.1]]
)
z2 = multivariate_normal.pdf(
pos,
mean=[1.0, 1.0],
cov=[[1.5, 0.0], [0.0, 0.5]]
)
z = 10 * (z1 - z2)
fig, axs = plt.subplots(ncols=2, figsize=(20, 10), sharex=True, sharey=True)
cmaps = [cm.rainbow, cm.autumn, cm.coolwarm, cm.gray]
countour_styles = [
{"colors": "k", "linestyles": "solid"},
{"colors": "k", "linestyles": "dashed"},
]
for i, ax in zip(range(len(cmaps)), axs.ravel()):
cs = ax.contour(x, y, z, 11, **countour_styles[i])
if i > 0:
ax.clabel(cs, fontsize=9, inline=1)
ax.grid(alpha=0.5)
fig.show()
###Output
_____no_output_____
###Markdown
¿Cuándo se debe utiliar countour plot?* x: Dato del tipo posicional o cuantitativo.* y: Dato de tipo posicional o cuantitativo. * z: Dato de tipo posicional o cuantitativo.***OBSERVACION***: Se debe tener suficiente densidad/regularidad de puntos como para poder obtener superficies de nivel. Campos de Vectores¿Porqué se llama quiver al campo de vectores en inglés?
###Code
def my_vector_field():
"""
You can even define a new function.
"""
X, Y = np.meshgrid(np.arange(0, 2 * np.pi, .2), np.arange(0, 2 * np.pi, .2))
U = np.cos(X)
V = np.sin(Y)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(20, 10))
Q1 = ax1.quiver(U, V)
qk1 = ax1.quiverkey(
Q1,
0.5,
0.92,
2,
r'$2 \frac{m}{s}$',
labelpos='W',
fontproperties={'weight': 'bold'}
)
Q2 = ax2.quiver(
X[::3, ::3],
Y[::3, ::3],
U[::3, ::3],
V[::3, ::3],
pivot='mid',
color='r',
units='inches'
)
qk2 = ax2.quiverkey(
Q2,
0.5,
0.03,
1,
r'$1 \frac{m}{s}$',
fontproperties={'weight': 'bold'}
)
ax2.plot(X[::3, ::3], Y[::3, ::3], 'k.')
ax2.set_title("pivot='mid'; every third arrow; units='inches'")
fig.show()
my_vector_field()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar campos de vectores?* x: Debe ser datos del tipo posicional o cuantitativo.* y: Debe ser datos de tipo posicional o cuantitativo. * z: Pendiente debe ser dato de tipo posicional o cuantitativo.Evitar: gráfico de campo de vectores si no es posible la interpretación correspondiente. El límite es la imaginaciónPuedes encontrar una infinidad de ejemplos en la Galería de Maplotlib ([link](https://matplotlib.org/3.1.1/gallery/index.html)). Mapas de Calor
###Code
from mpl_heatmap import heatmap, annotate_heatmap
vegetables = ["cucumber", "tomato", "lettuce", "asparagus",
"potato", "wheat", "barley"]
farmers = ["Farmer Joe", "Upland Bros.", "Smith Gardening",
"Agrifun", "Organiculture", "BioGoods Ltd.", "Cornylee Corp."]
harvest = np.array([[0.8, 2.4, 2.5, 3.9, 0.0, 4.0, 0.0],
[2.4, 0.0, 4.0, 1.0, 2.7, 0.0, 0.0],
[1.1, 2.4, 0.8, 4.3, 1.9, 4.4, 0.0],
[0.6, 0.0, 0.3, 0.0, 3.1, 0.0, 0.0],
[0.7, 1.7, 0.6, 2.6, 2.2, 6.2, 0.0],
[1.3, 1.2, 0.0, 0.0, 0.0, 3.2, 5.1],
[0.1, 2.0, 0.0, 1.4, 0.0, 1.9, 6.3]])
fig, ax = plt.subplots(figsize=(10, 10))
im, cbar = heatmap(harvest, vegetables, farmers, ax=ax,
cmap="YlGn", cbarlabel="harvest [t/year]")
texts = annotate_heatmap(im, valfmt="{x:.1f} t")
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Conjunto de Mandelbrot
###Code
def mandelbrot_set(xmin, xmax, ymin, ymax, xn, yn, maxiter, horizon=2.0):
X = np.linspace(xmin, xmax, xn).astype(np.float32)
Y = np.linspace(ymin, ymax, yn).astype(np.float32)
C = X + Y[:, None] * 1j
N = np.zeros_like(C, dtype=int)
Z = np.zeros_like(C)
for n in range(maxiter):
I = abs(Z) < horizon
N[I] = n
Z[I] = Z[I]**2 + C[I]
N[N == maxiter-1] = 0
return Z, N
import time
import matplotlib
from matplotlib import colors
xmin, xmax, xn = -2.25, +0.75, 3000 // 2
ymin, ymax, yn = -1.25, +1.25, 2500 // 2
maxiter = 200
horizon = 2.0 ** 40
log_horizon = np.log2(np.log(horizon))
Z, N = mandelbrot_set(xmin, xmax, ymin, ymax, xn, yn, maxiter, horizon)
# Normalized recount as explained in:
# https://linas.org/art-gallery/escape/smooth.html
# https://www.ibm.com/developerworks/community/blogs/jfp/entry/My_Christmas_Gift
# This line will generate warnings for null values but it is faster to
# process them afterwards using the nan_to_num
with np.errstate(invalid='ignore'):
M = np.nan_to_num(N + 1 - np.log2(np.log(abs(Z))) + log_horizon)
dpi = 72
width = 10
height = 10*yn/xn
fig = plt.figure(figsize=(width, height), dpi=dpi)
ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect=1)
# Shaded rendering
light = colors.LightSource(azdeg=315, altdeg=10)
M = light.shade(M, cmap=plt.cm.hot, vert_exag=1.5,
norm=colors.PowerNorm(0.3), blend_mode='hsv')
ax.imshow(M, extent=[xmin, xmax, ymin, ymax], interpolation="bicubic")
ax.set_xticks([])
ax.set_yticks([])
# Some advertisement for matplotlib
year = time.strftime("%Y")
text = ("The Mandelbrot fractal set\n"
"Rendered with matplotlib %s, %s - http://matplotlib.org"
% (matplotlib.__version__, year))
ax.text(xmin+.025, ymin+.025, text, color="white", fontsize=12, alpha=0.5)
plt.show()
###Output
_____no_output_____
###Markdown
MAT281 Aplicaciones de la Matemática en la Ingeniería Módulo 03 Clase 02: Visualización Imperativa Objetivos* Comprender el estilo de visualización imperativa.* Aplicar gráficos adecuados dependiendo de los datos. Contenidos* [Visualización Imperativa](imperative)* [Matplotlib](matplotlib)* [Gráfico a Gráfico](plot-plot) - [Gráfico de Barras](barplot) - [Gráfico de Líneas](lineplot) - [Scatter Plot](scatter-plot) - [Gráfico de Barras de Error](error-bar-plot) - [Countour Plot](countour-plot) - [Campo de Vectores](vector-field) Visualización ImperativaEste paradigma se focaliza en las instrucciones recibidas, ya que no abstrae las operaciones o codificaciones visuales. Algunas de sus características son:* Se especifica _Cómo_ se debe hacer algo.* Se deben especificar manualmente los pasos del trazado.* Especificación y ejecución entrelazadas.Coloquialmente se puede entender como que se debe decidir pixel a pixel lo que se desea mostrar. MatplotlibLo mejor para entender una librería es la explicación que entrega la misma comunidad, a continuación un extracto de la [página web](https://matplotlib.org) de matplotlib:_Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. Matplotlib can be used in Python scripts, the Python and IPython shells, the Jupyter notebook, web application servers, and four graphical user interface toolkits.__Matplotlib tries to make easy things easy and hard things possible. You can generate plots, histograms, power spectra, bar charts, errorcharts, scatterplots, etc., with just a few lines of code. For examples, see the sample plots and thumbnail gallery.__For simple plotting the pyplot module provides a MATLAB-like interface, particularly when combined with IPython. For the power user, you have full control of line styles, font properties, axes properties, etc, via an object oriented interface or via a set of functions familiar to MATLAB users._ Conceptos GeneralesEl trazado requiere acción en un rango de niveles, desde el más general (por ejemplo, 'contornear este arreglo 2D') hasta el más específico (por ejemplo, 'colorear un píxel de color rojo'). El propósito de esta librería es generar gráficos de manera rápida lo más fácil posible (con código de alto nivel), pero con la capacidad de usar código de bajo nivel cuando sea necesario.Por lo tanto, todo en matplotlib está organizado en una jerarquía. En la parte superior se encuentra el módulo `matplotlib.pyplot`. En este nivel, se utilizan funciones simples para agregar elementos de trazado (líneas, imágenes, texto, etc.) a los ejes actuales en la figura actual. El siguiente nivel en la jerarquía es el primer nivel de la interfaz orientada a objetos, en la que pyplot se usa solo para algunas funciones, como la creación de figuras, y el usuario crea y realiza un seguimiento explícito de los objetos de figuras y ejes. En este nivel, el usuario usa pyplot para crear figuras, y a través de esas figuras, se pueden crear uno o más objetos de ejes. Estos objetos de ejes se utilizan para la mayoría de las acciones de trazado.  FigureEs la visualización completa. _Figure_ realiza un seguimiento de todos los _Axes_ hijos y el _Canvas_. Una figura puede tener cualquier número de _Axes_, pero para ser útil debe tener al menos uno.La forma más fácil de crear una nueva _Figure_ es con pyplot:```pythonfig = plt.figure() an empty figure with no axesfig, ax_lst = plt.subplots(2, 2) a figure with a 2x2 grid of Axes``` AxesEsto es lo que se puede pensar como 'un gráfico', es la región de la imagen con el espacio de datos. Un _Figure_ dada puede contener muchos _Axes_, pero un objeto _Axe_ dado solo puede estar en un _Figure_. _Axes_ contiene dos (o tres en el caso de 3D) objetos _Axis_ que se ocupan de los límites de datos. Cada _Axe_ tiene un título, una etiqueta para el eje horizonal y una etiqueta para el eje vertical.La clase _Axes_ y sus funciones son el punto de entrada principal para trabajar con la interfaz orientada a objetos. AxisCorresponden a los ejes, algo así como líneas rectas. Se encargan de establecer los límites del gráfico y generar los ticks (las marcas en el eje) y los ticklabels (_strings_ que etiquetan los ticks). Gráfico a Gráfico
###Code
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.stats import multivariate_normal
%matplotlib inline
###Output
_____no_output_____
###Markdown
Gráfico de Barras
###Code
people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim')
y_pos = np.arange(len(people))
performance = 3 + 10 * np.random.rand(len(people))
error = np.random.rand(len(people))
# Another way without axes
fig = plt.figure(figsize=(20, 10))
plt.subplot(1, 2, 1)
plt.barh(y_pos, performance, xerr=error, align='center', color="g", alpha=0.4)
plt.yticks(y_pos, people)
plt.xlabel('Performance')
plt.subplot(1, 2, 2)
plt.bar(y_pos, performance, yerr=error, align='center', color="g", alpha=0.6)
plt.xticks(y_pos, people)
plt.xlabel('People')
plt.ylabel('Performance')
plt.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar gráfico de barras?* x: Debe ser datos del tipo nominal o ordinal.* y: Debe ser datos de tipo ordinal, posicional o cuantitativo.Evitar: gráfico de nominal vs nominal. Gráfico de Líneas
###Code
def f(t):
return np.exp(-t) * np.cos(2 * np.pi * t)
t1 = np.arange(0.0, 5.0, 0.5)
t2 = np.arange(0.0, 5.0, 0.02)
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(20, 10), sharex=True, sharey=True)
ax1.plot(t1, f(t1), "b")
ax1.grid()
ax1.set_title("arange step = 0.5")
ax2.plot(t2, f(t2), "b")
ax2.grid()
ax2.set_title("arange step = 0.02")
fig.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar gráfico de líneas?* x: Debe ser datos del tipo ordinal o cuantitativo.* y: Debe ser datos de tipo ordinal, posicional o cuantitativo. Scatter Plot
###Code
N = 100
r0 = 0.6
x = 0.9 * np.random.rand(N)
y = 0.9 * np.random.rand(N)
area = np.pi * (10 * np.random.rand(N)) ** 2 # 0 to 10 point radiuses
c = np.sqrt(area)
r = np.sqrt(x ** 2 + y ** 2)
cm1 = plt.cm.get_cmap('RdYlBu')
cm2 = plt.cm.get_cmap('Greys')
plt.figure(figsize=(20, 12))
area1 = np.ma.masked_where(r < r0, area)
area2 = np.ma.masked_where(r >= r0, area)
sc1 = plt.scatter(x, y, s=area1, marker='^', c=c, cmap=cm1)
plt.colorbar(sc1)
sc2 = plt.scatter(x, y, s=area2, marker='o', c=c, cmap=cm2)
plt.colorbar(sc2)
# Show the boundary between the regions:
theta = np.arange(0, np.pi / 2, 0.01)
plt.plot(r0 * np.cos(theta), r0 * np.sin(theta), "k:", lw=2.0)
plt.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar scatter plot?* x: Dato del tipo posicional o cuantitativo.* y: Dato del tipo posicional o cuantitativo. * z: Dato del tipo nominal u ordinal (opcional) ***OBSERVACION***: Si hay pocos puntos, también puede usarse para z datos de tipo posicional o cuantitativo. Gráfico de Barra de Error
###Code
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(20, 10))
x_error = 0.1 + 0.2*np.random.rand(len(x))
ax1.errorbar(x, y, xerr=x_error)
y_error = 0.1 + 0.2*np.random.rand(len(x))
ax2.errorbar(x, y, yerr=y_error)
fig.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar gráfico de barra de error?* x: Dato del tipo posicional o cuantitativo.* y: Dato del tipo posicional o cuantitativo. * z: Dato del tipo posicional o cuantitativo.Los valores de z tienen que tener las mismas unidades y. Countor Plot
###Code
x, y = np.mgrid[-3:3:.025, -2:2:.025]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x
pos[:, :, 1] = y
z1 = multivariate_normal.pdf(
pos,
mean=[-1.0, -1.0],
cov=[[1.0, 0.0], [0.0, 0.1]]
)
z2 = multivariate_normal.pdf(
pos,
mean=[1.0, 1.0],
cov=[[1.5, 0.0], [0.0, 0.5]]
)
z = 10 * (z1 - z2)
fig, axs = plt.subplots(ncols=2, figsize=(20, 10), sharex=True, sharey=True)
cmaps = [cm.rainbow, cm.autumn, cm.coolwarm, cm.gray]
countour_styles = [
{"colors": "k", "linestyles": "solid"},
{"colors": "k", "linestyles": "dashed"},
]
for i, ax in zip(range(len(cmaps)), axs.ravel()):
cs = ax.contour(x, y, z, 11, **countour_styles[i])
if i > 0:
ax.clabel(cs, fontsize=9, inline=1)
ax.grid(alpha=0.5)
fig.show()
###Output
_____no_output_____
###Markdown
¿Cuándo se debe utiliar countour plot?* x: Dato del tipo posicional o cuantitativo.* y: Dato de tipo posicional o cuantitativo. * z: Dato de tipo posicional o cuantitativo.***OBSERVACION***: Se debe tener suficiente densidad/regularidad de puntos como para poder obtener superficies de nivel. Campos de Vectores¿Porqué se llama quiver al campo de vectores en inglés?
###Code
def my_vector_field():
"""
You can even define a new function.
"""
X, Y = np.meshgrid(np.arange(0, 2 * np.pi, .2), np.arange(0, 2 * np.pi, .2))
U = np.cos(X)
V = np.sin(Y)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(20, 10))
Q1 = ax1.quiver(U, V)
qk1 = ax1.quiverkey(
Q1,
0.5,
0.92,
2,
r'$2 \frac{m}{s}$',
labelpos='W',
fontproperties={'weight': 'bold'}
)
Q2 = ax2.quiver(
X[::3, ::3],
Y[::3, ::3],
U[::3, ::3],
V[::3, ::3],
pivot='mid',
color='r',
units='inches'
)
qk2 = ax2.quiverkey(
Q2,
0.5,
0.03,
1,
r'$1 \frac{m}{s}$',
fontproperties={'weight': 'bold'}
)
ax2.plot(X[::3, ::3], Y[::3, ::3], 'k.')
ax2.set_title("pivot='mid'; every third arrow; units='inches'")
fig.show()
my_vector_field()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar campos de vectores?* x: Debe ser datos del tipo posicional o cuantitativo.* y: Debe ser datos de tipo posicional o cuantitativo. * z: Pendiente debe ser dato de tipo posicional o cuantitativo.Evitar: gráfico de campo de vectores si no es posible la interpretación correspondiente. El límite es la imaginaciónPuedes encontrar una infinidad de ejemplos en la Galería de Maplotlib ([link](https://matplotlib.org/3.1.1/gallery/index.html)). Mapas de Calor
###Code
from mpl_heatmap import heatmap, annotate_heatmap
vegetables = ["cucumber", "tomato", "lettuce", "asparagus",
"potato", "wheat", "barley"]
farmers = ["Farmer Joe", "Upland Bros.", "Smith Gardening",
"Agrifun", "Organiculture", "BioGoods Ltd.", "Cornylee Corp."]
harvest = np.array([[0.8, 2.4, 2.5, 3.9, 0.0, 4.0, 0.0],
[2.4, 0.0, 4.0, 1.0, 2.7, 0.0, 0.0],
[1.1, 2.4, 0.8, 4.3, 1.9, 4.4, 0.0],
[0.6, 0.0, 0.3, 0.0, 3.1, 0.0, 0.0],
[0.7, 1.7, 0.6, 2.6, 2.2, 6.2, 0.0],
[1.3, 1.2, 0.0, 0.0, 0.0, 3.2, 5.1],
[0.1, 2.0, 0.0, 1.4, 0.0, 1.9, 6.3]])
fig, ax = plt.subplots(figsize=(10, 10))
im, cbar = heatmap(harvest, vegetables, farmers, ax=ax,
cmap="YlGn", cbarlabel="harvest [t/year]")
texts = annotate_heatmap(im, valfmt="{x:.1f} t")
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Conjunto de Mandelbrot
###Code
def mandelbrot_set(xmin, xmax, ymin, ymax, xn, yn, maxiter, horizon=2.0):
X = np.linspace(xmin, xmax, xn).astype(np.float32)
Y = np.linspace(ymin, ymax, yn).astype(np.float32)
C = X + Y[:, None] * 1j
N = np.zeros_like(C, dtype=int)
Z = np.zeros_like(C)
for n in range(maxiter):
I = abs(Z) < horizon
N[I] = n
Z[I] = Z[I]**2 + C[I]
N[N == maxiter-1] = 0
return Z, N
import time
import matplotlib
from matplotlib import colors
xmin, xmax, xn = -2.25, +0.75, 3000 // 2
ymin, ymax, yn = -1.25, +1.25, 2500 // 2
maxiter = 200
horizon = 2.0 ** 40
log_horizon = np.log2(np.log(horizon))
Z, N = mandelbrot_set(xmin, xmax, ymin, ymax, xn, yn, maxiter, horizon)
# Normalized recount as explained in:
# https://linas.org/art-gallery/escape/smooth.html
# https://www.ibm.com/developerworks/community/blogs/jfp/entry/My_Christmas_Gift
# This line will generate warnings for null values but it is faster to
# process them afterwards using the nan_to_num
with np.errstate(invalid='ignore'):
M = np.nan_to_num(N + 1 - np.log2(np.log(abs(Z))) + log_horizon)
dpi = 72
width = 10
height = 10*yn/xn
fig = plt.figure(figsize=(width, height), dpi=dpi)
ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect=1)
# Shaded rendering
light = colors.LightSource(azdeg=315, altdeg=10)
M = light.shade(M, cmap=plt.cm.hot, vert_exag=1.5,
norm=colors.PowerNorm(0.3), blend_mode='hsv')
ax.imshow(M, extent=[xmin, xmax, ymin, ymax], interpolation="bicubic")
ax.set_xticks([])
ax.set_yticks([])
# Some advertisement for matplotlib
year = time.strftime("%Y")
text = ("The Mandelbrot fractal set\n"
"Rendered with matplotlib %s, %s - http://matplotlib.org"
% (matplotlib.__version__, year))
ax.text(xmin+.025, ymin+.025, text, color="white", fontsize=12, alpha=0.5)
plt.show()
###Output
_____no_output_____
###Markdown
MAT281 Aplicaciones de la Matemática en la Ingeniería Módulo 03 Clase 02: Visualización Imperativa Objetivos* Comprender el estilo de visualización imperativa.* Aplicar gráficos adecuados dependiendo de los datos. Contenidos* [Visualización Imperativa](imperative)* [Matplotlib](matplotlib)* [Gráfico a Gráfico](plot-plot) - [Gráfico de Barras](barplot) - [Gráfico de Líneas](lineplot) - [Scatter Plot](scatter-plot) - [Gráfico de Barras de Error](error-bar-plot) - [Countour Plot](countour-plot) - [Campo de Vectores](vector-field) Visualización ImperativaEste paradigma se focaliza en las instrucciones recibidas, ya que no abstrae las operaciones o codificaciones visuales. Algunas de sus características son:* Se especifica _Cómo_ se debe hacer algo.* Se deben especificar manualmente los pasos del trazado.* Especificación y ejecución entrelazadas.Coloquialmente se puede entender como que se debe decidir pixel a pixel lo que se desea mostrar. MatplotlibLo mejor para entender una librería es la explicación que entrega la misma comunidad, a continuación un extracto de la [página web](https://matplotlib.org) de matplotlib:_Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. Matplotlib can be used in Python scripts, the Python and IPython shells, the Jupyter notebook, web application servers, and four graphical user interface toolkits.__Matplotlib tries to make easy things easy and hard things possible. You can generate plots, histograms, power spectra, bar charts, errorcharts, scatterplots, etc., with just a few lines of code. For examples, see the sample plots and thumbnail gallery.__For simple plotting the pyplot module provides a MATLAB-like interface, particularly when combined with IPython. For the power user, you have full control of line styles, font properties, axes properties, etc, via an object oriented interface or via a set of functions familiar to MATLAB users._ Conceptos GeneralesEl trazado requiere acción en un rango de niveles, desde el más general (por ejemplo, 'contornear este arreglo 2D') hasta el más específico (por ejemplo, 'colorear un píxel de color rojo'). El propósito de esta librería es generar gráficos de manera rápida lo más fácil posible (con código de alto nivel), pero con la capacidad de usar código de bajo nivel cuando sea necesario.Por lo tanto, todo en matplotlib está organizado en una jerarquía. En la parte superior se encuentra el módulo `matplotlib.pyplot`. En este nivel, se utilizan funciones simples para agregar elementos de trazado (líneas, imágenes, texto, etc.) a los ejes actuales en la figura actual. El siguiente nivel en la jerarquía es el primer nivel de la interfaz orientada a objetos, en la que pyplot se usa solo para algunas funciones, como la creación de figuras, y el usuario crea y realiza un seguimiento explícito de los objetos de figuras y ejes. En este nivel, el usuario usa pyplot para crear figuras, y a través de esas figuras, se pueden crear uno o más objetos de ejes. Estos objetos de ejes se utilizan para la mayoría de las acciones de trazado.  FigureEs la visualización completa. _Figure_ realiza un seguimiento de todos los _Axes_ hijos y el _Canvas_. Una figura puede tener cualquier número de _Axes_, pero para ser útil debe tener al menos uno.La forma más fácil de crear una nueva _Figure_ es con pyplot:```pythonfig = plt.figure() an empty figure with no axesfig, ax_lst = plt.subplots(2, 2) a figure with a 2x2 grid of Axes``` AxesEsto es lo que se puede pensar como 'un gráfico', es la región de la imagen con el espacio de datos. Un _Figure_ dada puede contener muchos _Axes_, pero un objeto _Axe_ dado solo puede estar en un _Figure_. _Axes_ contiene dos (o tres en el caso de 3D) objetos _Axis_ que se ocupan de los límites de datos. Cada _Axe_ tiene un título, una etiqueta para el eje horizonal y una etiqueta para el eje vertical.La clase _Axes_ y sus funciones son el punto de entrada principal para trabajar con la interfaz orientada a objetos. AxisCorresponden a los ejes, algo así como líneas rectas. Se encargan de establecer los límites del gráfico y generar los ticks (las marcas en el eje) y los ticklabels (_strings_ que etiquetan los ticks). Gráfico a Gráfico
###Code
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.stats import multivariate_normal
%matplotlib inline
###Output
_____no_output_____
###Markdown
Gráfico de Barras
###Code
people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim')
y_pos = np.arange(len(people))
performance = 3 + 10 * np.random.rand(len(people))
error = np.random.rand(len(people))
# Another way without axes
fig = plt.figure(figsize=(20, 10))
plt.subplot(1, 2, 1)
plt.barh(y_pos, performance, xerr=error, align='center', color="g", alpha=0.4)
plt.yticks(y_pos, people)
plt.xlabel('Performance')
plt.subplot(1, 2, 2)
plt.bar(y_pos, performance, yerr=error, align='center', color="g", alpha=0.6)
plt.xticks(y_pos, people)
plt.xlabel('People')
plt.ylabel('Performance')
plt.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar gráfico de barras?* x: Debe ser datos del tipo nominal o ordinal.* y: Debe ser datos de tipo ordinal, posicional o cuantitativo.Evitar: gráfico de nominal vs nominal. Gráfico de Líneas
###Code
def f(t):
return np.exp(-t) * np.cos(2 * np.pi * t)
t1 = np.arange(0.0, 5.0, 0.5)
t2 = np.arange(0.0, 5.0, 0.02)
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(20, 10), sharex=True, sharey=True)
ax1.plot(t1, f(t1), "b")
ax1.grid()
ax1.set_title("arange step = 0.5")
ax2.plot(t2, f(t2), "b")
ax2.grid()
ax2.set_title("arange step = 0.02")
fig.show()
###Output
C:\Users\Public\Documents\Anaconda\lib\site-packages\ipykernel_launcher.py:17: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure.
###Markdown
¿Cuándo utilizar gráfico de líneas?* x: Debe ser datos del tipo ordinal o cuantitativo.* y: Debe ser datos de tipo ordinal, posicional o cuantitativo. Scatter Plot
###Code
N = 100
r0 = 0.6
x = 0.9 * np.random.rand(N)
y = 0.9 * np.random.rand(N)
area = np.pi * (10 * np.random.rand(N)) ** 2 # 0 to 10 point radiuses
c = np.sqrt(area)
r = np.sqrt(x ** 2 + y ** 2)
cm1 = plt.cm.get_cmap('RdYlBu')
cm2 = plt.cm.get_cmap('Greys')
plt.figure(figsize=(20, 12))
area1 = np.ma.masked_where(r < r0, area)
area2 = np.ma.masked_where(r >= r0, area)
sc1 = plt.scatter(x, y, s=area1, marker='^', c=c, cmap=cm1)
plt.colorbar(sc1)
sc2 = plt.scatter(x, y, s=area2, marker='o', c=c, cmap=cm2)
plt.colorbar(sc2)
# Show the boundary between the regions:
theta = np.arange(0, np.pi / 2, 0.01)
plt.plot(r0 * np.cos(theta), r0 * np.sin(theta), "k:", lw=2.0)
plt.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar scatter plot?* x: Dato del tipo posicional o cuantitativo.* y: Dato del tipo posicional o cuantitativo. * z: Dato del tipo nominal u ordinal (opcional) ***OBSERVACION***: Si hay pocos puntos, también puede usarse para z datos de tipo posicional o cuantitativo. Gráfico de Barra de Error
###Code
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(20, 10))
x_error = 0.1 + 0.2*np.random.rand(len(x))
ax1.errorbar(x, y, xerr=x_error)
y_error = 0.1 + 0.2*np.random.rand(len(x))
ax2.errorbar(x, y, yerr=y_error)
fig.show()
###Output
C:\Users\Public\Documents\Anaconda\lib\site-packages\ipykernel_launcher.py:12: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure.
if sys.path[0] == '':
###Markdown
¿Cuándo utilizar gráfico de barra de error?* x: Dato del tipo posicional o cuantitativo.* y: Dato del tipo posicional o cuantitativo. * z: Dato del tipo posicional o cuantitativo.Los valores de z tienen que tener las mismas unidades y. Countor Plot
###Code
x, y = np.mgrid[-3:3:.025, -2:2:.025]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x
pos[:, :, 1] = y
z1 = multivariate_normal.pdf(
pos,
mean=[-1.0, -1.0],
cov=[[1.0, 0.0], [0.0, 0.1]]
)
z2 = multivariate_normal.pdf(
pos,
mean=[1.0, 1.0],
cov=[[1.5, 0.0], [0.0, 0.5]]
)
z = 10 * (z1 - z2)
fig, axs = plt.subplots(ncols=2, figsize=(20, 10), sharex=True, sharey=True)
cmaps = [cm.rainbow, cm.autumn, cm.coolwarm, cm.gray]
countour_styles = [
{"colors": "k", "linestyles": "solid"},
{"colors": "k", "linestyles": "dashed"},
]
for i, ax in zip(range(len(cmaps)), axs.ravel()):
cs = ax.contour(x, y, z, 11, **countour_styles[i])
if i > 0:
ax.clabel(cs, fontsize=9, inline=1)
ax.grid(alpha=0.5)
fig.show()
###Output
C:\Users\Public\Documents\Anaconda\lib\site-packages\ipykernel_launcher.py:29: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure.
###Markdown
¿Cuándo se debe utiliar countour plot?* x: Dato del tipo posicional o cuantitativo.* y: Dato de tipo posicional o cuantitativo. * z: Dato de tipo posicional o cuantitativo.***OBSERVACION***: Se debe tener suficiente densidad/regularidad de puntos como para poder obtener superficies de nivel. Campos de Vectores¿Porqué se llama quiver al campo de vectores en inglés?
###Code
def my_vector_field():
"""
You can even define a new function.
"""
X, Y = np.meshgrid(np.arange(0, 2 * np.pi, .2), np.arange(0, 2 * np.pi, .2))
U = np.cos(X)
V = np.sin(Y)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(20, 10))
Q1 = ax1.quiver(U, V)
qk1 = ax1.quiverkey(
Q1,
0.5,
0.92,
2,
r'$2 \frac{m}{s}$',
labelpos='W',
fontproperties={'weight': 'bold'}
)
Q2 = ax2.quiver(
X[::3, ::3],
Y[::3, ::3],
U[::3, ::3],
V[::3, ::3],
pivot='mid',
color='r',
units='inches'
)
qk2 = ax2.quiverkey(
Q2,
0.5,
0.03,
1,
r'$1 \frac{m}{s}$',
fontproperties={'weight': 'bold'}
)
ax2.plot(X[::3, ::3], Y[::3, ::3], 'k.')
ax2.set_title("pivot='mid'; every third arrow; units='inches'")
fig.show()
my_vector_field()
###Output
C:\Users\Public\Documents\Anaconda\lib\site-packages\ipykernel_launcher.py:42: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure.
###Markdown
¿Cuándo utilizar campos de vectores?* x: Debe ser datos del tipo posicional o cuantitativo.* y: Debe ser datos de tipo posicional o cuantitativo. * z: Pendiente debe ser dato de tipo posicional o cuantitativo.Evitar: gráfico de campo de vectores si no es posible la interpretación correspondiente. El límite es la imaginaciónPuedes encontrar una infinidad de ejemplos en la Galería de Maplotlib ([link](https://matplotlib.org/3.1.1/gallery/index.html)). Mapas de Calor
###Code
from mpl_heatmap import heatmap, annotate_heatmap
vegetables = ["cucumber", "tomato", "lettuce", "asparagus",
"potato", "wheat", "barley"]
farmers = ["Farmer Joe", "Upland Bros.", "Smith Gardening",
"Agrifun", "Organiculture", "BioGoods Ltd.", "Cornylee Corp."]
harvest = np.array([[0.8, 2.4, 2.5, 3.9, 0.0, 4.0, 0.0],
[2.4, 0.0, 4.0, 1.0, 2.7, 0.0, 0.0],
[1.1, 2.4, 0.8, 4.3, 1.9, 4.4, 0.0],
[0.6, 0.0, 0.3, 0.0, 3.1, 0.0, 0.0],
[0.7, 1.7, 0.6, 2.6, 2.2, 6.2, 0.0],
[1.3, 1.2, 0.0, 0.0, 0.0, 3.2, 5.1],
[0.1, 2.0, 0.0, 1.4, 0.0, 1.9, 6.3]])
fig, ax = plt.subplots(figsize=(10, 10))
im, cbar = heatmap(harvest, vegetables, farmers, ax=ax,
cmap="YlGn", cbarlabel="harvest [t/year]")
texts = annotate_heatmap(im, valfmt="{x:.1f} t")
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Conjunto de Mandelbrot
###Code
def mandelbrot_set(xmin, xmax, ymin, ymax, xn, yn, maxiter, horizon=2.0):
X = np.linspace(xmin, xmax, xn).astype(np.float32)
Y = np.linspace(ymin, ymax, yn).astype(np.float32)
C = X + Y[:, None] * 1j
N = np.zeros_like(C, dtype=int)
Z = np.zeros_like(C)
for n in range(maxiter):
I = abs(Z) < horizon
N[I] = n
Z[I] = Z[I]**2 + C[I]
N[N == maxiter-1] = 0
return Z, N
import time
import matplotlib
from matplotlib import colors
xmin, xmax, xn = -2.25, +0.75, 3000 // 2
ymin, ymax, yn = -1.25, +1.25, 2500 // 2
maxiter = 200
horizon = 2.0 ** 40
log_horizon = np.log2(np.log(horizon))
Z, N = mandelbrot_set(xmin, xmax, ymin, ymax, xn, yn, maxiter, horizon)
# Normalized recount as explained in:
# https://linas.org/art-gallery/escape/smooth.html
# https://www.ibm.com/developerworks/community/blogs/jfp/entry/My_Christmas_Gift
# This line will generate warnings for null values but it is faster to
# process them afterwards using the nan_to_num
with np.errstate(invalid='ignore'):
M = np.nan_to_num(N + 1 - np.log2(np.log(abs(Z))) + log_horizon)
dpi = 72
width = 10
height = 10*yn/xn
fig = plt.figure(figsize=(width, height), dpi=dpi)
ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect=1)
# Shaded rendering
light = colors.LightSource(azdeg=315, altdeg=10)
M = light.shade(M, cmap=plt.cm.hot, vert_exag=1.5,
norm=colors.PowerNorm(0.3), blend_mode='hsv')
ax.imshow(M, extent=[xmin, xmax, ymin, ymax], interpolation="bicubic")
ax.set_xticks([])
ax.set_yticks([])
# Some advertisement for matplotlib
year = time.strftime("%Y")
text = ("The Mandelbrot fractal set\n"
"Rendered with matplotlib %s, %s - http://matplotlib.org"
% (matplotlib.__version__, year))
ax.text(xmin+.025, ymin+.025, text, color="white", fontsize=12, alpha=0.5)
plt.show()
###Output
_____no_output_____
###Markdown
MAT281 Aplicaciones de la Matemática en la Ingeniería Módulo 03 Clase 02: Visualización Imperativa Objetivos* Comprender el estilo de visualización imperativa.* Aplicar gráficos adecuados dependiendo de los datos. Contenidos* [Visualización Imperativa](imperative)* [Matplotlib](matplotlib)* [Gráfico a Gráfico](plot-plot) - [Gráfico de Barras](barplot) - [Gráfico de Líneas](lineplot) - [Scatter Plot](scatter-plot) - [Gráfico de Barras de Error](error-bar-plot) - [Countour Plot](countour-plot) - [Campo de Vectores](vector-field) Visualización ImperativaEste paradigma se focaliza en las instrucciones recibidas, ya que no abstrae las operaciones o codificaciones visuales. Algunas de sus características son:* Se especifica _Cómo_ se debe hacer algo.* Se deben especificar manualmente los pasos del trazado.* Especificación y ejecución entrelazadas.Coloquialmente se puede entender como que se debe decidir pixel a pixel lo que se desea mostrar. MatplotlibLo mejor para entender una librería es la explicación que entrega la misma comunidad, a continuación un extracto de la [página web](https://matplotlib.org) de matplotlib:_Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. Matplotlib can be used in Python scripts, the Python and IPython shells, the Jupyter notebook, web application servers, and four graphical user interface toolkits.__Matplotlib tries to make easy things easy and hard things possible. You can generate plots, histograms, power spectra, bar charts, errorcharts, scatterplots, etc., with just a few lines of code. For examples, see the sample plots and thumbnail gallery.__For simple plotting the pyplot module provides a MATLAB-like interface, particularly when combined with IPython. For the power user, you have full control of line styles, font properties, axes properties, etc, via an object oriented interface or via a set of functions familiar to MATLAB users._ Conceptos GeneralesEl trazado requiere acción en un rango de niveles, desde el más general (por ejemplo, 'contornear este arreglo 2D') hasta el más específico (por ejemplo, 'colorear un píxel de color rojo'). El propósito de esta librería es generar gráficos de manera rápida lo más fácil posible (con código de alto nivel), pero con la capacidad de usar código de bajo nivel cuando sea necesario.Por lo tanto, todo en matplotlib está organizado en una jerarquía. En la parte superior se encuentra el módulo `matplotlib.pyplot`. En este nivel, se utilizan funciones simples para agregar elementos de trazado (líneas, imágenes, texto, etc.) a los ejes actuales en la figura actual. El siguiente nivel en la jerarquía es el primer nivel de la interfaz orientada a objetos, en la que pyplot se usa solo para algunas funciones, como la creación de figuras, y el usuario crea y realiza un seguimiento explícito de los objetos de figuras y ejes. En este nivel, el usuario usa pyplot para crear figuras, y a través de esas figuras, se pueden crear uno o más objetos de ejes. Estos objetos de ejes se utilizan para la mayoría de las acciones de trazado.  FigureEs la visualización completa. _Figure_ realiza un seguimiento de todos los _Axes_ hijos y el _Canvas_. Una figura puede tener cualquier número de _Axes_, pero para ser útil debe tener al menos uno.La forma más fácil de crear una nueva _Figure_ es con pyplot:```pythonfig = plt.figure() an empty figure with no axesfig, ax_lst = plt.subplots(2, 2) a figure with a 2x2 grid of Axes``` AxesEsto es lo que se puede pensar como 'un gráfico', es la región de la imagen con el espacio de datos. Un _Figure_ dada puede contener muchos _Axes_, pero un objeto _Axe_ dado solo puede estar en un _Figure_. _Axes_ contiene dos (o tres en el caso de 3D) objetos _Axis_ que se ocupan de los límites de datos. Cada _Axe_ tiene un título, una etiqueta para el eje horizonal y una etiqueta para el eje vertical.La clase _Axes_ y sus funciones son el punto de entrada principal para trabajar con la interfaz orientada a objetos. AxisCorresponden a los ejes, algo así como líneas rectas. Se encargan de establecer los límites del gráfico y generar los ticks (las marcas en el eje) y los ticklabels (_strings_ que etiquetan los ticks). Gráfico a Gráfico
###Code
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.stats import multivariate_normal
%matplotlib inline
###Output
_____no_output_____
###Markdown
Gráfico de Barras
###Code
people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim')
y_pos = np.arange(len(people))
performance = 3 + 10 * np.random.rand(len(people))
error = np.random.rand(len(people))
# Another way without axes
fig = plt.figure(figsize=(20, 10))
plt.subplot(1, 2, 1)
plt.barh(y_pos, performance, xerr=error, align='center', color="g", alpha=0.4)
plt.yticks(y_pos, people)
plt.xlabel('Performance')
plt.subplot(1, 2, 2)
plt.bar(y_pos, performance, yerr=error, align='center', color="g", alpha=0.6)
plt.xticks(y_pos, people)
plt.xlabel('People')
plt.ylabel('Performance')
plt.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar gráfico de barras?* x: Debe ser datos del tipo nominal o ordinal.* y: Debe ser datos de tipo ordinal, posicional o cuantitativo.Evitar: gráfico de nominal vs nominal. Gráfico de Líneas
###Code
def f(t):
return np.exp(-t) * np.cos(2 * np.pi * t)
t1 = np.arange(0.0, 5.0, 0.5)
t2 = np.arange(0.0, 5.0, 0.02)
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(20, 10), sharex=True, sharey=True)
ax1.plot(t1, f(t1), "b")
ax1.grid()
ax1.set_title("arange step = 0.5")
ax2.plot(t2, f(t2), "b")
ax2.grid()
ax2.set_title("arange step = 0.02")
fig.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar gráfico de líneas?* x: Debe ser datos del tipo ordinal o cuantitativo.* y: Debe ser datos de tipo ordinal, posicional o cuantitativo. Scatter Plot
###Code
N = 100
r0 = 0.6
x = 0.9 * np.random.rand(N)
y = 0.9 * np.random.rand(N)
area = np.pi * (10 * np.random.rand(N)) ** 2 # 0 to 10 point radiuses
c = np.sqrt(area)
r = np.sqrt(x ** 2 + y ** 2)
cm1 = plt.cm.get_cmap('RdYlBu')
cm2 = plt.cm.get_cmap('Greys')
plt.figure(figsize=(20, 12))
area1 = np.ma.masked_where(r < r0, area)
area2 = np.ma.masked_where(r >= r0, area)
sc1 = plt.scatter(x, y, s=area1, marker='^', c=c, cmap=cm1)
plt.colorbar(sc1)
sc2 = plt.scatter(x, y, s=area2, marker='o', c=c, cmap=cm2)
plt.colorbar(sc2)
# Show the boundary between the regions:
theta = np.arange(0, np.pi / 2, 0.01)
plt.plot(r0 * np.cos(theta), r0 * np.sin(theta), "k:", lw=2.0)
plt.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar scatter plot?* x: Dato del tipo posicional o cuantitativo.* y: Dato del tipo posicional o cuantitativo. * z: Dato del tipo nominal u ordinal (opcional) ***OBSERVACION***: Si hay pocos puntos, también puede usarse para z datos de tipo posicional o cuantitativo. Gráfico de Barra de Error
###Code
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(20, 10))
x_error = 0.1 + 0.2*np.random.rand(len(x))
ax1.errorbar(x, y, xerr=x_error)
y_error = 0.1 + 0.2*np.random.rand(len(x))
ax2.errorbar(x, y, yerr=y_error)
fig.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar gráfico de barra de error?* x: Dato del tipo posicional o cuantitativo.* y: Dato del tipo posicional o cuantitativo. * z: Dato del tipo posicional o cuantitativo.Los valores de z tienen que tener las mismas unidades y. Countor Plot
###Code
x, y = np.mgrid[-3:3:.025, -2:2:.025]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x
pos[:, :, 1] = y
z1 = multivariate_normal.pdf(
pos,
mean=[-1.0, -1.0],
cov=[[1.0, 0.0], [0.0, 0.1]]
)
z2 = multivariate_normal.pdf(
pos,
mean=[1.0, 1.0],
cov=[[1.5, 0.0], [0.0, 0.5]]
)
z = 10 * (z1 - z2)
fig, axs = plt.subplots(ncols=2, figsize=(20, 10), sharex=True, sharey=True)
cmaps = [cm.rainbow, cm.autumn, cm.coolwarm, cm.gray]
countour_styles = [
{"colors": "k", "linestyles": "solid"},
{"colors": "k", "linestyles": "dashed"},
]
for i, ax in zip(range(len(cmaps)), axs.ravel()):
cs = ax.contour(x, y, z, 11, **countour_styles[i])
if i > 0:
ax.clabel(cs, fontsize=9, inline=1)
ax.grid(alpha=0.5)
fig.show()
###Output
_____no_output_____
###Markdown
¿Cuándo se debe utiliar countour plot?* x: Dato del tipo posicional o cuantitativo.* y: Dato de tipo posicional o cuantitativo. * z: Dato de tipo posicional o cuantitativo.***OBSERVACION***: Se debe tener suficiente densidad/regularidad de puntos como para poder obtener superficies de nivel. Campos de Vectores¿Porqué se llama quiver al campo de vectores en inglés?
###Code
def my_vector_field():
"""
You can even define a new function.
"""
X, Y = np.meshgrid(np.arange(0, 2 * np.pi, .2), np.arange(0, 2 * np.pi, .2))
U = np.cos(X)
V = np.sin(Y)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(20, 10))
Q1 = ax1.quiver(U, V)
qk1 = ax1.quiverkey(
Q1,
0.5,
0.92,
2,
r'$2 \frac{m}{s}$',
labelpos='W',
fontproperties={'weight': 'bold'}
)
Q2 = ax2.quiver(
X[::3, ::3],
Y[::3, ::3],
U[::3, ::3],
V[::3, ::3],
pivot='mid',
color='r',
units='inches'
)
qk2 = ax2.quiverkey(
Q2,
0.5,
0.03,
1,
r'$1 \frac{m}{s}$',
fontproperties={'weight': 'bold'}
)
ax2.plot(X[::3, ::3], Y[::3, ::3], 'k.')
ax2.set_title("pivot='mid'; every third arrow; units='inches'")
fig.show()
my_vector_field()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar campos de vectores?* x: Debe ser datos del tipo posicional o cuantitativo.* y: Debe ser datos de tipo posicional o cuantitativo. * z: Pendiente debe ser dato de tipo posicional o cuantitativo.Evitar: gráfico de campo de vectores si no es posible la interpretación correspondiente. El límite es la imaginaciónPuedes encontrar una infinidad de ejemplos en la Galería de Maplotlib ([link](https://matplotlib.org/3.1.1/gallery/index.html)). Mapas de Calor
###Code
from mpl_heatmap import heatmap, annotate_heatmap
vegetables = ["cucumber", "tomato", "lettuce", "asparagus",
"potato", "wheat", "barley"]
farmers = ["Farmer Joe", "Upland Bros.", "Smith Gardening",
"Agrifun", "Organiculture", "BioGoods Ltd.", "Cornylee Corp."]
harvest = np.array([[0.8, 2.4, 2.5, 3.9, 0.0, 4.0, 0.0],
[2.4, 0.0, 4.0, 1.0, 2.7, 0.0, 0.0],
[1.1, 2.4, 0.8, 4.3, 1.9, 4.4, 0.0],
[0.6, 0.0, 0.3, 0.0, 3.1, 0.0, 0.0],
[0.7, 1.7, 0.6, 2.6, 2.2, 6.2, 0.0],
[1.3, 1.2, 0.0, 0.0, 0.0, 3.2, 5.1],
[0.1, 2.0, 0.0, 1.4, 0.0, 1.9, 6.3]])
fig, ax = plt.subplots(figsize=(10, 10))
im, cbar = heatmap(harvest, vegetables, farmers, ax=ax,
cmap="YlGn", cbarlabel="harvest [t/year]")
texts = annotate_heatmap(im, valfmt="{x:.1f} t")
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Conjunto de Mandelbrot
###Code
def mandelbrot_set(xmin, xmax, ymin, ymax, xn, yn, maxiter, horizon=2.0):
X = np.linspace(xmin, xmax, xn).astype(np.float32)
Y = np.linspace(ymin, ymax, yn).astype(np.float32)
C = X + Y[:, None] * 1j
N = np.zeros_like(C, dtype=int)
Z = np.zeros_like(C)
for n in range(maxiter):
I = abs(Z) < horizon
N[I] = n
Z[I] = Z[I]**2 + C[I]
N[N == maxiter-1] = 0
return Z, N
import time
import matplotlib
from matplotlib import colors
xmin, xmax, xn = -2.25, +0.75, 3000 // 2
ymin, ymax, yn = -1.25, +1.25, 2500 // 2
maxiter = 200
horizon = 2.0 ** 40
log_horizon = np.log2(np.log(horizon))
Z, N = mandelbrot_set(xmin, xmax, ymin, ymax, xn, yn, maxiter, horizon)
# Normalized recount as explained in:
# https://linas.org/art-gallery/escape/smooth.html
# https://www.ibm.com/developerworks/community/blogs/jfp/entry/My_Christmas_Gift
# This line will generate warnings for null values but it is faster to
# process them afterwards using the nan_to_num
with np.errstate(invalid='ignore'):
M = np.nan_to_num(N + 1 - np.log2(np.log(abs(Z))) + log_horizon)
dpi = 72
width = 10
height = 10*yn/xn
fig = plt.figure(figsize=(width, height), dpi=dpi)
ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect=1)
# Shaded rendering
light = colors.LightSource(azdeg=315, altdeg=10)
M = light.shade(M, cmap=plt.cm.hot, vert_exag=1.5,
norm=colors.PowerNorm(0.3), blend_mode='hsv')
ax.imshow(M, extent=[xmin, xmax, ymin, ymax], interpolation="bicubic")
ax.set_xticks([])
ax.set_yticks([])
# Some advertisement for matplotlib
year = time.strftime("%Y")
text = ("The Mandelbrot fractal set\n"
"Rendered with matplotlib %s, %s - http://matplotlib.org"
% (matplotlib.__version__, year))
ax.text(xmin+.025, ymin+.025, text, color="white", fontsize=12, alpha=0.5)
plt.show()
###Output
_____no_output_____
###Markdown
MAT281 Aplicaciones de la Matemática en la Ingeniería Módulo 03 Clase 02: Visualización Imperativa Objetivos* Comprender el estilo de visualización imperativa.* Aplicar gráficos adecuados dependiendo de los datos. Contenidos* [Visualización Imperativa](imperative)* [Matplotlib](matplotlib)* [Gráfico a Gráfico](plot-plot) - [Gráfico de Barras](barplot) - [Gráfico de Líneas](lineplot) - [Scatter Plot](scatter-plot) - [Gráfico de Barras de Error](error-bar-plot) - [Countour Plot](countour-plot) - [Campo de Vectores](vector-field) Visualización ImperativaEste paradigma se focaliza en las instrucciones recibidas, ya que no abstrae las operaciones o codificaciones visuales. Algunas de sus características son:* Se especifica _Cómo_ se debe hacer algo.* Se deben especificar manualmente los pasos del trazado.* Especificación y ejecución entrelazadas.Coloquialmente se puede entender como que se debe decidir pixel a pixel lo que se desea mostrar. MatplotlibLo mejor para entender una librería es la explicación que entrega la misma comunidad, a continuación un extracto de la [página web](https://matplotlib.org) de matplotlib:_Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. Matplotlib can be used in Python scripts, the Python and IPython shells, the Jupyter notebook, web application servers, and four graphical user interface toolkits.__Matplotlib tries to make easy things easy and hard things possible. You can generate plots, histograms, power spectra, bar charts, errorcharts, scatterplots, etc., with just a few lines of code. For examples, see the sample plots and thumbnail gallery.__For simple plotting the pyplot module provides a MATLAB-like interface, particularly when combined with IPython. For the power user, you have full control of line styles, font properties, axes properties, etc, via an object oriented interface or via a set of functions familiar to MATLAB users._ Conceptos GeneralesEl trazado requiere acción en un rango de niveles, desde el más general (por ejemplo, 'contornear este arreglo 2D') hasta el más específico (por ejemplo, 'colorear un píxel de color rojo'). El propósito de esta librería es generar gráficos de manera rápida lo más fácil posible (con código de alto nivel), pero con la capacidad de usar código de bajo nivel cuando sea necesario.Por lo tanto, todo en matplotlib está organizado en una jerarquía. En la parte superior se encuentra el módulo `matplotlib.pyplot`. En este nivel, se utilizan funciones simples para agregar elementos de trazado (líneas, imágenes, texto, etc.) a los ejes actuales en la figura actual. El siguiente nivel en la jerarquía es el primer nivel de la interfaz orientada a objetos, en la que pyplot se usa solo para algunas funciones, como la creación de figuras, y el usuario crea y realiza un seguimiento explícito de los objetos de figuras y ejes. En este nivel, el usuario usa pyplot para crear figuras, y a través de esas figuras, se pueden crear uno o más objetos de ejes. Estos objetos de ejes se utilizan para la mayoría de las acciones de trazado.  FigureEs la visualización completa. _Figure_ realiza un seguimiento de todos los _Axes_ hijos y el _Canvas_. Una figura puede tener cualquier número de _Axes_, pero para ser útil debe tener al menos uno.La forma más fácil de crear una nueva _Figure_ es con pyplot:```pythonfig = plt.figure() an empty figure with no axesfig, ax_lst = plt.subplots(2, 2) a figure with a 2x2 grid of Axes``` AxesEsto es lo que se puede pensar como 'un gráfico', es la región de la imagen con el espacio de datos. Un _Figure_ dada puede contener muchos _Axes_, pero un objeto _Axe_ dado solo puede estar en un _Figure_. _Axes_ contiene dos (o tres en el caso de 3D) objetos _Axis_ que se ocupan de los límites de datos. Cada _Axe_ tiene un título, una etiqueta para el eje horizonal y una etiqueta para el eje vertical.La clase _Axes_ y sus funciones son el punto de entrada principal para trabajar con la interfaz orientada a objetos. AxisCorresponden a los ejes, algo así como líneas rectas. Se encargan de establecer los límites del gráfico y generar los ticks (las marcas en el eje) y los ticklabels (_strings_ que etiquetan los ticks). Gráfico a Gráfico
###Code
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.stats import multivariate_normal
%matplotlib inline
###Output
_____no_output_____
###Markdown
Gráfico de Barras
###Code
people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim')
y_pos = np.arange(len(people))
performance = 3 + 10 * np.random.rand(len(people))
error = np.random.rand(len(people))
# Another way without axes
fig = plt.figure(figsize=(20, 10))
plt.subplot(1, 2, 1)
plt.barh(y_pos, performance, xerr=error, align='center', color="g", alpha=0.4)
plt.yticks(y_pos, people)
plt.xlabel('Performance')
plt.subplot(1, 2, 2)
plt.bar(y_pos, performance, yerr=error, align='center', color="g", alpha=0.6)
plt.xticks(y_pos, people)
plt.xlabel('People')
plt.ylabel('Performance')
plt.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar gráfico de barras?* x: Debe ser datos del tipo nominal o ordinal.* y: Debe ser datos de tipo ordinal, posicional o cuantitativo.Evitar: gráfico de nominal vs nominal. Gráfico de Líneas
###Code
def f(t):
return np.exp(-t) * np.cos(2 * np.pi * t)
t1 = np.arange(0.0, 5.0, 0.5)
t2 = np.arange(0.0, 5.0, 0.02)
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(20, 10), sharex=True, sharey=True)
ax1.plot(t1, f(t1), "b")
ax1.grid()
ax1.set_title("arange step = 0.5")
ax2.plot(t2, f(t2), "b")
ax2.grid()
ax2.set_title("arange step = 0.02")
fig.show()
###Output
/home/daniela/miniconda3/envs/mat281/lib/python3.7/site-packages/ipykernel_launcher.py:17: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure.
###Markdown
¿Cuándo utilizar gráfico de líneas?* x: Debe ser datos del tipo ordinal o cuantitativo.* y: Debe ser datos de tipo ordinal, posicional o cuantitativo. Scatter Plot
###Code
N = 100
r0 = 0.6
x = 0.9 * np.random.rand(N)
y = 0.9 * np.random.rand(N)
area = np.pi * (10 * np.random.rand(N)) ** 2 # 0 to 10 point radiuses
c = np.sqrt(area)
r = np.sqrt(x ** 2 + y ** 2)
cm1 = plt.cm.get_cmap('RdYlBu')
cm2 = plt.cm.get_cmap('Greys')
plt.figure(figsize=(20, 12))
area1 = np.ma.masked_where(r < r0, area)
area2 = np.ma.masked_where(r >= r0, area)
sc1 = plt.scatter(x, y, s=area1, marker='^', c=c, cmap=cm1)
plt.colorbar(sc1)
sc2 = plt.scatter(x, y, s=area2, marker='o', c=c, cmap=cm2)
plt.colorbar(sc2)
# Show the boundary between the regions:
theta = np.arange(0, np.pi / 2, 0.01)
plt.plot(r0 * np.cos(theta), r0 * np.sin(theta), "k:", lw=2.0)
plt.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar scatter plot?* x: Dato del tipo posicional o cuantitativo.* y: Dato del tipo posicional o cuantitativo. * z: Dato del tipo nominal u ordinal (opcional) ***OBSERVACION***: Si hay pocos puntos, también puede usarse para z datos de tipo posicional o cuantitativo. Gráfico de Barra de Error
###Code
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(20, 10))
x_error = 0.1 + 0.2*np.random.rand(len(x))
ax1.errorbar(x, y, xerr=x_error)
y_error = 0.1 + 0.2*np.random.rand(len(x))
ax2.errorbar(x, y, yerr=y_error)
fig.show()
###Output
/home/daniela/miniconda3/envs/mat281/lib/python3.7/site-packages/ipykernel_launcher.py:12: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure.
if sys.path[0] == '':
###Markdown
¿Cuándo utilizar gráfico de barra de error?* x: Dato del tipo posicional o cuantitativo.* y: Dato del tipo posicional o cuantitativo. * z: Dato del tipo posicional o cuantitativo.Los valores de z tienen que tener las mismas unidades y. Countor Plot
###Code
x, y = np.mgrid[-3:3:.025, -2:2:.025]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x
pos[:, :, 1] = y
z1 = multivariate_normal.pdf(
pos,
mean=[-1.0, -1.0],
cov=[[1.0, 0.0], [0.0, 0.1]]
)
z2 = multivariate_normal.pdf(
pos,
mean=[1.0, 1.0],
cov=[[1.5, 0.0], [0.0, 0.5]]
)
z = 10 * (z1 - z2)
fig, axs = plt.subplots(ncols=2, figsize=(20, 10), sharex=True, sharey=True)
cmaps = [cm.rainbow, cm.autumn, cm.coolwarm, cm.gray]
countour_styles = [
{"colors": "k", "linestyles": "solid"},
{"colors": "k", "linestyles": "dashed"},
]
for i, ax in zip(range(len(cmaps)), axs.ravel()):
cs = ax.contour(x, y, z, 11, **countour_styles[i])
if i > 0:
ax.clabel(cs, fontsize=9, inline=1)
ax.grid(alpha=0.5)
fig.show()
###Output
/home/daniela/miniconda3/envs/mat281/lib/python3.7/site-packages/ipykernel_launcher.py:29: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure.
###Markdown
¿Cuándo se debe utiliar countour plot?* x: Dato del tipo posicional o cuantitativo.* y: Dato de tipo posicional o cuantitativo. * z: Dato de tipo posicional o cuantitativo.***OBSERVACION***: Se debe tener suficiente densidad/regularidad de puntos como para poder obtener superficies de nivel. Campos de Vectores¿Porqué se llama quiver al campo de vectores en inglés?
###Code
def my_vector_field():
"""
You can even define a new function.
"""
X, Y = np.meshgrid(np.arange(0, 2 * np.pi, .2), np.arange(0, 2 * np.pi, .2))
U = np.cos(X)
V = np.sin(Y)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(20, 10))
Q1 = ax1.quiver(U, V)
qk1 = ax1.quiverkey(
Q1,
0.5,
0.92,
2,
r'$2 \frac{m}{s}$',
labelpos='W',
fontproperties={'weight': 'bold'}
)
Q2 = ax2.quiver(
X[::3, ::3],
Y[::3, ::3],
U[::3, ::3],
V[::3, ::3],
pivot='mid',
color='r',
units='inches'
)
qk2 = ax2.quiverkey(
Q2,
0.5,
0.03,
1,
r'$1 \frac{m}{s}$',
fontproperties={'weight': 'bold'}
)
ax2.plot(X[::3, ::3], Y[::3, ::3], 'k.')
ax2.set_title("pivot='mid'; every third arrow; units='inches'")
fig.show()
my_vector_field()
###Output
/home/daniela/miniconda3/envs/mat281/lib/python3.7/site-packages/ipykernel_launcher.py:42: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure.
###Markdown
¿Cuándo utilizar campos de vectores?* x: Debe ser datos del tipo posicional o cuantitativo.* y: Debe ser datos de tipo posicional o cuantitativo. * z: Pendiente debe ser dato de tipo posicional o cuantitativo.Evitar: gráfico de campo de vectores si no es posible la interpretación correspondiente. El límite es la imaginaciónPuedes encontrar una infinidad de ejemplos en la Galería de Maplotlib ([link](https://matplotlib.org/3.1.1/gallery/index.html)). Mapas de Calor
###Code
from mpl_heatmap import heatmap, annotate_heatmap
vegetables = ["cucumber", "tomato", "lettuce", "asparagus",
"potato", "wheat", "barley"]
farmers = ["Farmer Joe", "Upland Bros.", "Smith Gardening",
"Agrifun", "Organiculture", "BioGoods Ltd.", "Cornylee Corp."]
harvest = np.array([[0.8, 2.4, 2.5, 3.9, 0.0, 4.0, 0.0],
[2.4, 0.0, 4.0, 1.0, 2.7, 0.0, 0.0],
[1.1, 2.4, 0.8, 4.3, 1.9, 4.4, 0.0],
[0.6, 0.0, 0.3, 0.0, 3.1, 0.0, 0.0],
[0.7, 1.7, 0.6, 2.6, 2.2, 6.2, 0.0],
[1.3, 1.2, 0.0, 0.0, 0.0, 3.2, 5.1],
[0.1, 2.0, 0.0, 1.4, 0.0, 1.9, 6.3]])
fig, ax = plt.subplots(figsize=(10, 10))
im, cbar = heatmap(harvest, vegetables, farmers, ax=ax,
cmap="YlGn", cbarlabel="harvest [t/year]")
texts = annotate_heatmap(im, valfmt="{x:.1f} t")
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Conjunto de Mandelbrot
###Code
def mandelbrot_set(xmin, xmax, ymin, ymax, xn, yn, maxiter, horizon=2.0):
X = np.linspace(xmin, xmax, xn).astype(np.float32)
Y = np.linspace(ymin, ymax, yn).astype(np.float32)
C = X + Y[:, None] * 1j
N = np.zeros_like(C, dtype=int)
Z = np.zeros_like(C)
for n in range(maxiter):
I = abs(Z) < horizon
N[I] = n
Z[I] = Z[I]**2 + C[I]
N[N == maxiter-1] = 0
return Z, N
import time
import matplotlib
from matplotlib import colors
xmin, xmax, xn = -2.25, +0.75, 3000 // 2
ymin, ymax, yn = -1.25, +1.25, 2500 // 2
maxiter = 200
horizon = 2.0 ** 40
log_horizon = np.log2(np.log(horizon))
Z, N = mandelbrot_set(xmin, xmax, ymin, ymax, xn, yn, maxiter, horizon)
# Normalized recount as explained in:
# https://linas.org/art-gallery/escape/smooth.html
# https://www.ibm.com/developerworks/community/blogs/jfp/entry/My_Christmas_Gift
# This line will generate warnings for null values but it is faster to
# process them afterwards using the nan_to_num
with np.errstate(invalid='ignore'):
M = np.nan_to_num(N + 1 - np.log2(np.log(abs(Z))) + log_horizon)
dpi = 72
width = 10
height = 10*yn/xn
fig = plt.figure(figsize=(width, height), dpi=dpi)
ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect=1)
# Shaded rendering
light = colors.LightSource(azdeg=315, altdeg=10)
M = light.shade(M, cmap=plt.cm.hot, vert_exag=1.5,
norm=colors.PowerNorm(0.3), blend_mode='hsv')
ax.imshow(M, extent=[xmin, xmax, ymin, ymax], interpolation="bicubic")
ax.set_xticks([])
ax.set_yticks([])
# Some advertisement for matplotlib
year = time.strftime("%Y")
text = ("The Mandelbrot fractal set\n"
"Rendered with matplotlib %s, %s - http://matplotlib.org"
% (matplotlib.__version__, year))
ax.text(xmin+.025, ymin+.025, text, color="white", fontsize=12, alpha=0.5)
plt.show()
###Output
_____no_output_____
###Markdown
MAT281 Aplicaciones de la Matemática en la Ingeniería Módulo 03 Clase 02: Visualización Imperativa Objetivos* Comprender el estilo de visualización imperativa.* Aplicar gráficos adecuados dependiendo de los datos. Contenidos* [Visualización Imperativa](imperative)* [Matplotlib](matplotlib)* [Gráfico a Gráfico](plot-plot) - [Gráfico de Barras](barplot) - [Gráfico de Líneas](lineplot) - [Scatter Plot](scatter-plot) - [Gráfico de Barras de Error](error-bar-plot) - [Countour Plot](countour-plot) - [Campo de Vectores](vector-field) Visualización ImperativaEste paradigma se focaliza en las instrucciones recibidas, ya que no abstrae las operaciones o codificaciones visuales. Algunas de sus características son:* Se especifica _Cómo_ se debe hacer algo.* Se deben especificar manualmente los pasos del trazado.* Especificación y ejecución entrelazadas.Coloquialmente se puede entender como que se debe decidir pixel a pixel lo que se desea mostrar. MatplotlibLo mejor para entender una librería es la explicación que entrega la misma comunidad, a continuación un extracto de la [página web](https://matplotlib.org) de matplotlib:_Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. Matplotlib can be used in Python scripts, the Python and IPython shells, the Jupyter notebook, web application servers, and four graphical user interface toolkits.__Matplotlib tries to make easy things easy and hard things possible. You can generate plots, histograms, power spectra, bar charts, errorcharts, scatterplots, etc., with just a few lines of code. For examples, see the sample plots and thumbnail gallery.__For simple plotting the pyplot module provides a MATLAB-like interface, particularly when combined with IPython. For the power user, you have full control of line styles, font properties, axes properties, etc, via an object oriented interface or via a set of functions familiar to MATLAB users._ Conceptos GeneralesEl trazado requiere acción en un rango de niveles, desde el más general (por ejemplo, 'contornear este arreglo 2D') hasta el más específico (por ejemplo, 'colorear un píxel de color rojo'). El propósito de esta librería es generar gráficos de manera rápida lo más fácil posible (con código de alto nivel), pero con la capacidad de usar código de bajo nivel cuando sea necesario.Por lo tanto, todo en matplotlib está organizado en una jerarquía. En la parte superior se encuentra el módulo `matplotlib.pyplot`. En este nivel, se utilizan funciones simples para agregar elementos de trazado (líneas, imágenes, texto, etc.) a los ejes actuales en la figura actual. El siguiente nivel en la jerarquía es el primer nivel de la interfaz orientada a objetos, en la que pyplot se usa solo para algunas funciones, como la creación de figuras, y el usuario crea y realiza un seguimiento explícito de los objetos de figuras y ejes. En este nivel, el usuario usa pyplot para crear figuras, y a través de esas figuras, se pueden crear uno o más objetos de ejes. Estos objetos de ejes se utilizan para la mayoría de las acciones de trazado.  FigureEs la visualización completa. _Figure_ realiza un seguimiento de todos los _Axes_ hijos y el _Canvas_. Una figura puede tener cualquier número de _Axes_, pero para ser útil debe tener al menos uno.La forma más fácil de crear una nueva _Figure_ es con pyplot:```pythonfig = plt.figure() an empty figure with no axesfig, ax_lst = plt.subplots(2, 2) a figure with a 2x2 grid of Axes``` AxesEsto es lo que se puede pensar como 'un gráfico', es la región de la imagen con el espacio de datos. Un _Figure_ dada puede contener muchos _Axes_, pero un objeto _Axe_ dado solo puede estar en un _Figure_. _Axes_ contiene dos (o tres en el caso de 3D) objetos _Axis_ que se ocupan de los límites de datos. Cada _Axe_ tiene un título, una etiqueta para el eje horizonal y una etiqueta para el eje vertical.La clase _Axes_ y sus funciones son el punto de entrada principal para trabajar con la interfaz orientada a objetos. AxisCorresponden a los ejes, algo así como líneas rectas. Se encargan de establecer los límites del gráfico y generar los ticks (las marcas en el eje) y los ticklabels (_strings_ que etiquetan los ticks). Gráfico a Gráfico
###Code
#HOLAAAAAAAAAAAAAAA
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.stats import multivariate_normal
%matplotlib inline
###Output
_____no_output_____
###Markdown
Gráfico de Barras
###Code
people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim')
y_pos = np.arange(len(people))
performance = 3 + 10 * np.random.rand(len(people))
error = np.random.rand(len(people))
# Another way without axes
fig = plt.figure(figsize=(20, 10))
plt.subplot(1, 2, 1)
plt.barh(y_pos, performance, xerr=error, align='center', color="g", alpha=0.4)
plt.yticks(y_pos, people)
plt.xlabel('Performance')
plt.subplot(1, 2, 2)
plt.bar(y_pos, performance, yerr=error, align='center', color="g", alpha=0.6)
plt.xticks(y_pos, people)
plt.xlabel('People')
plt.ylabel('Performance')
plt.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar gráfico de barras?* x: Debe ser datos del tipo nominal o ordinal.* y: Debe ser datos de tipo ordinal, posicional o cuantitativo.Evitar: gráfico de nominal vs nominal. Gráfico de Líneas
###Code
def f(t):
return np.exp(-t) * np.cos(2 * np.pi * t)
t1 = np.arange(0.0, 5.0, 0.5)
t2 = np.arange(0.0, 5.0, 0.02)
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(20, 10), sharex=True, sharey=True)
ax1.plot(t1, f(t1), "b")
ax1.grid()
ax1.set_title("arange step = 0.5")
ax2.plot(t2, f(t2), "b")
ax2.grid()
ax2.set_title("arange step = 0.02")
fig.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar gráfico de líneas?* x: Debe ser datos del tipo ordinal o cuantitativo.* y: Debe ser datos de tipo ordinal, posicional o cuantitativo. Scatter Plot
###Code
N = 100
r0 = 0.6
x = 0.9 * np.random.rand(N)
y = 0.9 * np.random.rand(N)
area = np.pi * (10 * np.random.rand(N)) ** 2 # 0 to 10 point radiuses
c = np.sqrt(area)
r = np.sqrt(x ** 2 + y ** 2)
cm1 = plt.cm.get_cmap('RdYlBu')
cm2 = plt.cm.get_cmap('Greys')
plt.figure(figsize=(20, 12))
area1 = np.ma.masked_where(r < r0, area)
area2 = np.ma.masked_where(r >= r0, area)
sc1 = plt.scatter(x, y, s=area1, marker='^', c=c, cmap=cm1)
plt.colorbar(sc1)
sc2 = plt.scatter(x, y, s=area2, marker='o', c=c, cmap=cm2)
plt.colorbar(sc2)
# Show the boundary between the regions:
theta = np.arange(0, np.pi / 2, 0.01)
plt.plot(r0 * np.cos(theta), r0 * np.sin(theta), "k:", lw=2.0)
plt.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar scatter plot?* x: Dato del tipo posicional o cuantitativo.* y: Dato del tipo posicional o cuantitativo. * z: Dato del tipo nominal u ordinal (opcional) ***OBSERVACION***: Si hay pocos puntos, también puede usarse para z datos de tipo posicional o cuantitativo. Gráfico de Barra de Error
###Code
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(20, 10))
x_error = 0.1 + 0.2*np.random.rand(len(x))
ax1.errorbar(x, y, xerr=x_error)
y_error = 0.1 + 0.2*np.random.rand(len(x))
ax2.errorbar(x, y, yerr=y_error)
fig.show()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar gráfico de barra de error?* x: Dato del tipo posicional o cuantitativo.* y: Dato del tipo posicional o cuantitativo. * z: Dato del tipo posicional o cuantitativo.Los valores de z tienen que tener las mismas unidades y. Countor Plot
###Code
x, y = np.mgrid[-3:3:.025, -2:2:.025]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x
pos[:, :, 1] = y
z1 = multivariate_normal.pdf(
pos,
mean=[-1.0, -1.0],
cov=[[1.0, 0.0], [0.0, 0.1]]
)
z2 = multivariate_normal.pdf(
pos,
mean=[1.0, 1.0],
cov=[[1.5, 0.0], [0.0, 0.5]]
)
z = 10 * (z1 - z2)
fig, axs = plt.subplots(ncols=2, figsize=(20, 10), sharex=True, sharey=True)
cmaps = [cm.rainbow, cm.autumn, cm.coolwarm, cm.gray]
countour_styles = [
{"colors": "k", "linestyles": "solid"},
{"colors": "k", "linestyles": "dashed"},
]
for i, ax in zip(range(len(cmaps)), axs.ravel()):
cs = ax.contour(x, y, z, 11, **countour_styles[i])
if i > 0:
ax.clabel(cs, fontsize=9, inline=1)
ax.grid(alpha=0.5)
fig.show()
###Output
_____no_output_____
###Markdown
¿Cuándo se debe utiliar countour plot?* x: Dato del tipo posicional o cuantitativo.* y: Dato de tipo posicional o cuantitativo. * z: Dato de tipo posicional o cuantitativo.***OBSERVACION***: Se debe tener suficiente densidad/regularidad de puntos como para poder obtener superficies de nivel. Campos de Vectores¿Porqué se llama quiver al campo de vectores en inglés?
###Code
def my_vector_field():
"""
You can even define a new function.
"""
X, Y = np.meshgrid(np.arange(0, 2 * np.pi, .2), np.arange(0, 2 * np.pi, .2))
U = np.cos(X)
V = np.sin(Y)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(20, 10))
Q1 = ax1.quiver(U, V)
qk1 = ax1.quiverkey(
Q1,
0.5,
0.92,
2,
r'$2 \frac{m}{s}$',
labelpos='W',
fontproperties={'weight': 'bold'}
)
Q2 = ax2.quiver(
X[::3, ::3],
Y[::3, ::3],
U[::3, ::3],
V[::3, ::3],
pivot='mid',
color='r',
units='inches'
)
qk2 = ax2.quiverkey(
Q2,
0.5,
0.03,
1,
r'$1 \frac{m}{s}$',
fontproperties={'weight': 'bold'}
)
ax2.plot(X[::3, ::3], Y[::3, ::3], 'k.')
ax2.set_title("pivot='mid'; every third arrow; units='inches'")
fig.show()
my_vector_field()
###Output
_____no_output_____
###Markdown
¿Cuándo utilizar campos de vectores?* x: Debe ser datos del tipo posicional o cuantitativo.* y: Debe ser datos de tipo posicional o cuantitativo. * z: Pendiente debe ser dato de tipo posicional o cuantitativo.Evitar: gráfico de campo de vectores si no es posible la interpretación correspondiente. El límite es la imaginaciónPuedes encontrar una infinidad de ejemplos en la Galería de Maplotlib ([link](https://matplotlib.org/3.1.1/gallery/index.html)). Mapas de Calor
###Code
from mpl_heatmap import heatmap, annotate_heatmap
vegetables = ["cucumber", "tomato", "lettuce", "asparagus",
"potato", "wheat", "barley"]
farmers = ["Farmer Joe", "Upland Bros.", "Smith Gardening",
"Agrifun", "Organiculture", "BioGoods Ltd.", "Cornylee Corp."]
harvest = np.array([[0.8, 2.4, 2.5, 3.9, 0.0, 4.0, 0.0],
[2.4, 0.0, 4.0, 1.0, 2.7, 0.0, 0.0],
[1.1, 2.4, 0.8, 4.3, 1.9, 4.4, 0.0],
[0.6, 0.0, 0.3, 0.0, 3.1, 0.0, 0.0],
[0.7, 1.7, 0.6, 2.6, 2.2, 6.2, 0.0],
[1.3, 1.2, 0.0, 0.0, 0.0, 3.2, 5.1],
[0.1, 2.0, 0.0, 1.4, 0.0, 1.9, 6.3]])
fig, ax = plt.subplots(figsize=(10, 10))
im, cbar = heatmap(harvest, vegetables, farmers, ax=ax,
cmap="YlGn", cbarlabel="harvest [t/year]")
texts = annotate_heatmap(im, valfmt="{x:.1f} t")
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Conjunto de Mandelbrot
###Code
def mandelbrot_set(xmin, xmax, ymin, ymax, xn, yn, maxiter, horizon=2.0):
X = np.linspace(xmin, xmax, xn).astype(np.float32)
Y = np.linspace(ymin, ymax, yn).astype(np.float32)
C = X + Y[:, None] * 1j
N = np.zeros_like(C, dtype=int)
Z = np.zeros_like(C)
for n in range(maxiter):
I = abs(Z) < horizon
N[I] = n
Z[I] = Z[I]**2 + C[I]
N[N == maxiter-1] = 0
return Z, N
import time
import matplotlib
from matplotlib import colors
xmin, xmax, xn = -2.25, +0.75, 3000 // 2
ymin, ymax, yn = -1.25, +1.25, 2500 // 2
maxiter = 200
horizon = 2.0 ** 40
log_horizon = np.log2(np.log(horizon))
Z, N = mandelbrot_set(xmin, xmax, ymin, ymax, xn, yn, maxiter, horizon)
# Normalized recount as explained in:
# https://linas.org/art-gallery/escape/smooth.html
# https://www.ibm.com/developerworks/community/blogs/jfp/entry/My_Christmas_Gift
# This line will generate warnings for null values but it is faster to
# process them afterwards using the nan_to_num
with np.errstate(invalid='ignore'):
M = np.nan_to_num(N + 1 - np.log2(np.log(abs(Z))) + log_horizon)
dpi = 72
width = 10
height = 10*yn/xn
fig = plt.figure(figsize=(width, height), dpi=dpi)
ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect=1)
# Shaded rendering
light = colors.LightSource(azdeg=315, altdeg=10)
M = light.shade(M, cmap=plt.cm.hot, vert_exag=1.5,
norm=colors.PowerNorm(0.3), blend_mode='hsv')
ax.imshow(M, extent=[xmin, xmax, ymin, ymax], interpolation="bicubic")
ax.set_xticks([])
ax.set_yticks([])
# Some advertisement for matplotlib
year = time.strftime("%Y")
text = ("The Mandelbrot fractal set\n"
"Rendered with matplotlib %s, %s - http://matplotlib.org"
% (matplotlib.__version__, year))
ax.text(xmin+.025, ymin+.025, text, color="white", fontsize=12, alpha=0.5)
plt.show()
###Output
_____no_output_____ |
Python Code Challenges/Fibonacci_sequence_generator.ipynb | ###Markdown
Resources Python[Python 3 Documentation](https://docs.python.org/3/library/) General[Stackoverflow](https://stackoverflow.com/) YouTube vids[Kalle Hallden](https://www.youtube.com/channel/UCWr0mx597DnSGLFk1WfvSkQ)[PyCon 2019](https://www.youtube.com/channel/UCxs2IIVXaEHHA4BtTiWZ2mQ)[Tech With Tim](https://www.youtube.com/channel/UC4JX40jDee_tINbkjycV4Sg)[Python Programmer](https://www.youtube.com/user/consumerchampion)[sentdex](https://www.youtube.com/user/sentdex) Markdown links[Markdown Cheatsheet](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet)[Markdown Guide](https://www.markdownguide.org/)[Markdown Table Generator](https://www.tablesgenerator.com/markdown_tables) Code```df.reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill='')``````df = pd.DataFrame(np.random.randint(500,4000,size=(200, 1)), columns=list('A'))``````df['randNumCol'] = np.random.randint(1, 6, df.shape[0])`````` Declare a list that is to be converted into a column tradcounthvac = xyzhvac.x.count()tradehvac = tradcounthvac * ['hvac'] tradcountelec = xyzelec.x.count()tradeelec = tradcountelec * ['elec'] Using 'Trade' as the column name and equating it to the list xyzhvac['Trade'] = tradehvacxyzelec['Trade'] = tradeelec``` Packages```! pip install pandas-profiling``````! pip install plotly``````! pip install cufflinks``````! pip install plotly==4.2.1``````!pip install dovpanda``````import numpy as np``````import pandas as pd``````import pandas_profiling``````import plotly.graph_objects as go``````import dovpanda``` pandas[What can you do with the new ‘Pandas’?](https://towardsdatascience.com/what-can-you-do-with-the-new-pandas-2d24cf8d8b4b)[Reordering Pandas DataFrame Columns: Thumbs Down On Standard Solutions](https://towardsdatascience.com/reordering-pandas-dataframe-columns-thumbs-down-on-standard-solutions-1ff0bc2941d5)[pandas Documentation](https://pandas.pydata.org/pandas-docs/stable/)[7 practical pandas tips when you start working with the library](https://towardsdatascience.com/7-practical-pandas-tips-when-you-start-working-with-the-library-e4a9205eb443)[dataframe transpose](https://www.geeksforgeeks.org/python-pandas-dataframe-transpose/)[Combining DataFrames with Pandas](https://datacarpentry.org/python-ecology-lesson/05-merging-data/)[dovpanda](https://github.com/dovpanda-dev/dovpanda)[Selecting Subsets of Data in Pandas: Part 1](https://medium.com/dunder-data/selecting-subsets-of-data-in-pandas-6fcd0170be9c)[10 simple Python tips to speed up your data analysis](https://thenextweb.com/syndication/2020/10/12/10-simple-python-tips-to-speed-up-your-data-analysis/)[15 Tips and Tricks to use in Jupyter Notebooks](https://towardsdatascience.com/15-tips-and-tricks-to-use-jupyter-notebook-more-efficiently-ef05ede4e4b9)```result = df.transpose() ```5 functions to examine your data: ```df.head()', df.describe(), df.info(), df.shape, df.sum(), df['Trade'].value_counts() ```Reports: ```pandas_profiling.ProfileReport(df)```Import: ```import pandas_profiling```Save a dataframe to a csv```df.to_csv```Create a Pandas Dataframe```df = pd.DataFrame(data) ```Read a csv file```pd.read_csv('')```Read a excel file```pd.read_excel('')```All rows that have a sepal length greater than 6 are dangerous ```df['is_dangerous'] = np.where(df['sepal length (cm)']>6, 'yes', 'no')```Max columns option ```pd.set_option('display.max_columns', 500)```Max row option ```pd.set_option('display.max_rows', 500)```to see columns ```df.columns```replace strings ```df.columns = df.columns.str.replace(' \(cm\)', '').str.replace(' ', '_')``` plotly[plotly Graphing Libraries](https://plot.ly/python/)[Different Colors for Bars in Barchart by their Value](https://community.plot.ly/t/different-colors-for-bars-in-barchart-by-their-value/6527) Scikit-Learn[A beginner’s guide to Linear Regression in Python with Scikit-Learn](https://towardsdatascience.com/a-beginners-guide-to-linear-regression-in-python-with-scikit-learn-83a8f7ae2b4f) Notes
###Code
# LinkedIn Learning
# https://www.linkedin.com/learning/learning-python-generators/challenge-fibonacci-sequence-generator?contextUrn=urn%3Ali%3AlyndaLearningPath%3A56db2f4b3dd5596be4e4989f&u=80827442
# Fibonacci Sequence Generator
def fib_generator():
l = [0]
while True:
if len(l) == 1:
l.append(1)
yield 1
else:
n = l[-1] + l[-2]
l.append(n)
yield n
# Linkedin Solution
def fib_generator2():
trailing, lead = 0,1
while True:
yield lead
trailing, lead = lead, trailing+lead
fib = fib_generator()
fib2 = fib_generator2()
fib.__next__()
fib2.__next__()
###Output
_____no_output_____ |
notebooks/misc/jax_intro.ipynb | ###Markdown
Yet another JAX tutorial Kevin Murphy ([email protected]).Last update: September 2021.[JAX](https://github.com/google/jax) is a version of NumPy that runs fast on CPU, GPU and TPU, by compiling the computational graph to XLA (Accelerated Linear Algebra). It also has an excellent automatic differentiation library, extending the earlier [autograd](https://github.com/hips/autograd) package. This library makes it easy to compute higher order derivatives, gradients of complex functions (e.g., optimize an iterative solver), etc.The JAX interface is almost identical to NumPy (by design), but with some small differences, and lots of additional features.We give a brief introduction below. For more details, see [this list of JAX tutorials](https://github.com/probml/probml-notebooks/blob/main/markdown/jax_tutorials.md) Setup
###Code
# Standard Python libraries
from __future__ import absolute_import, division, print_function, unicode_literals
from functools import partial
import os
import time
import numpy as np
np.set_printoptions(precision=3)
import glob
import matplotlib.pyplot as plt
import PIL
import imageio
from typing import Tuple, NamedTuple
from IPython import display
%matplotlib inline
import sklearn
import jax
import jax.numpy as jnp
from jax import random, vmap, jit, grad, value_and_grad, hessian, jacfwd, jacrev
print("jax version {}".format(jax.__version__))
# Check the jax backend
print("jax backend {}".format(jax.lib.xla_bridge.get_backend().platform))
key = random.PRNGKey(0)
###Output
jax version 0.2.19
jax backend gpu
###Markdown
Hardware acceleratorsColab makes it easy to use GPUs and TPUs for speeding up some workflows, especially related to deep learning. GPUsColab offers graphics processing units (GPUs) which can be much faster than CPUs (central processing units), as we illustrate below.
###Code
# Check if GPU is available and its model, memory ...etc.
!nvidia-smi
# Check if JAX is using GPU
print("jax backend {}".format(jax.lib.xla_bridge.get_backend().platform))
# Check the devices avaiable for JAX
jax.devices()
###Output
jax backend gpu
###Markdown
Let's see how JAX can speed up things like matrix-matrix multiplication.First the numpy/CPU version.
###Code
# Parameters for the experiment
size = int(1e3)
number_of_loops = int(1e2)
# Standard numpy CPU
def f(x=None):
if not isinstance(x, np.ndarray):
x = np.ones((size, size), dtype=np.float32)
return np.dot(x, x.T)
%timeit -o -n $number_of_loops f()
res = _ # get result of last cell
time_cpu = res.best
print(time_cpu)
###Output
0.016404767970000192
###Markdown
Now we look at the JAX version. JAX supports execution on [XLA](https://www.tensorflow.org/xla) devices, which can be CPU, GPU or even TPU. We added that block_until_ready because JAX uses [asynchronous execution](https://jax.readthedocs.io/en/latest/async_dispatch.html) by default.
###Code
# JAX device execution
# https://github.com/google/jax/issues/1598
def jf(x=None):
if not isinstance(x, jnp.ndarray):
x = jnp.ones((size, size), dtype=jnp.float32)
return jnp.dot(x, x.T)
f_gpu = jit(jf, backend="gpu")
f_cpu = jit(jf, backend="cpu")
# Time the CPU version
%timeit -o -n $number_of_loops f_cpu()
res = _
time_jcpu = res.best
print(time_jcpu)
# Time the GPU version
%timeit -o -n $number_of_loops f_gpu().block_until_ready()
res = _
time_jgpu = res.best
print(time_jgpu)
print("JAX CPU time {:0.6f}, Numpy CPU time {:0.6f}, speedup {:0.6f}".format(time_jcpu, time_cpu, time_cpu / time_jcpu))
print("JAX GPU time {:0.6f}, Numpy CPU time {:0.6f}, speedup {:0.6f}".format(time_jgpu, time_cpu, time_cpu / time_jgpu))
###Output
JAX CPU time 0.025696, Numpy CPU time 0.016405, speedup 0.638425
JAX GPU time 0.000485, Numpy CPU time 0.016405, speedup 33.795971
###Markdown
In the above example we see that JAX GPU is much faster than Numpy CPU.However we also see that JAX CPU is slower than Numpy CPU - this can happen with simple functions, but usually JAX provides a speedup, even on CPU, if you JIT compile a complex function (see below). We can move numpy arrays to the GPU for speed. The result will be transferred back to CPU for printing, saving, etc.
###Code
from jax import device_put
x = np.ones((size, size)).astype(np.float32)
print(type(x))
%timeit -o -n $number_of_loops f(x)
x = device_put(x)
print(type(x))
%timeit -o -n $number_of_loops jf(x)
###Output
<class 'numpy.ndarray'>
100 loops, best of 5: 14.8 ms per loop
<class 'jaxlib.xla_extension.DeviceArray'>
100 loops, best of 5: 452 µs per loop
###Markdown
TPUsWe can turn on the tensor processing unit by selecting from the Colab runtime.Everything else "just works" as before.
###Code
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu()
###Output
_____no_output_____
###Markdown
If everything is set up correctly, the following command should return a list of 8 TPU devices.
###Code
jax.local_devices()
###Output
_____no_output_____
###Markdown
Vmap We often write a function to process a single vector or matrix, and then want to apply it to a batch of data. Using for loops is slow, and manually batchifying code is complex. Fortunately we can use the `vmap` function, which will map our function across a set of inputs, automatically batchifying it. Example: 1d convolution(This example is from the Deepmind tutorial.)Consider standard 1d convolution of two vectors.
###Code
x = jnp.arange(5)
w = jnp.array([2.0, 3.0, 4.0])
def convolve(x, w):
output = []
for i in range(1, len(x) - 1):
output.append(jnp.dot(x[i - 1 : i + 2], w))
return jnp.array(output)
convolve(x, w)
###Output
_____no_output_____
###Markdown
Now suppose we want to convolve multiple vectors with multiple kernels. The simplest way is to use a for loop, but this is slow.
###Code
xs = jnp.stack([x, x])
ws = jnp.stack([w, w])
def manually_batched_convolve(xs, ws):
output = []
for i in range(xs.shape[0]):
output.append(convolve(xs[i], ws[i]))
return jnp.stack(output)
manually_batched_convolve(xs, ws)
###Output
_____no_output_____
###Markdown
We can manually vectorize the code, but it is complex.
###Code
def manually_vectorised_convolve(xs, ws):
output = []
for i in range(1, xs.shape[-1] - 1):
output.append(jnp.sum(xs[:, i - 1 : i + 2] * ws, axis=1))
return jnp.stack(output, axis=1)
manually_vectorised_convolve(xs, ws)
###Output
_____no_output_____
###Markdown
Fortunately vmap can do this for us!
###Code
auto_batch_convolve = jax.vmap(convolve)
auto_batch_convolve(xs, ws)
###Output
_____no_output_____
###Markdown
AxesBy default, vmap vectorizes over the first axis of each of its inputs. If the first argument has a batch and the second does not, ,specify `in_axes=[0,None]`, so the second argument is not vectorized over.
###Code
jax.vmap(convolve, in_axes=[0, None])(xs, w)
###Output
_____no_output_____
###Markdown
We can also vectorize over other dimensions.
###Code
print(xs.shape)
xst = jnp.transpose(xs)
print(xst.shape)
wst = jnp.transpose(ws)
auto_batch_convolve_v2 = jax.vmap(convolve, in_axes=1, out_axes=1)
auto_batch_convolve_v2(xst, wst)
###Output
(2, 5)
(5, 2)
###Markdown
Example: logistic regressionWe now give another example, using binary logistic regression.Let us start with a predictor for a single example.
###Code
D = 2
N = 3
w = np.random.normal(size=(D,))
X = np.random.normal(size=(N, D))
def sigmoid(x):
return 0.5 * (jnp.tanh(x / 2.0) + 1)
def predict_single(x):
return sigmoid(jnp.dot(w, x)) # <(D) , (D)> = (1) # inner product
print(predict_single(X[0, :])) # works
print(predict_single(X)) # fails
###Output
_____no_output_____
###Markdown
We can manually vectorize the code by remembering the shapes, so $X w$ multiplies each row of $X$ with $w$.
###Code
def predict_batch(X):
return sigmoid(jnp.dot(X, w)) # (N,D) * (D,1) = (N,1) # matrix-vector multiply
print(predict_batch(X))
###Output
[0.232 0.121 0.878]
###Markdown
But it easier to use vmap.
###Code
print(vmap(predict_single)(X))
###Output
[0.232 0.121 0.878]
###Markdown
Failure casesVmap requires that the shapes of all the variables that are created by the function that is being mapped are the same for all values of the input arguments, as explained [here](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html). So vmap cannot be used to do any kind of embarassingly parallel task. Below we give a simple example of where this fails, since internally we create a vector whose length depends on the input 'length'.
###Code
def example_fun(length, val=4):
return jnp.sum(jnp.ones((length,)) * val)
xs = jnp.arange(1, 10)
# Python map works fine
v = list(map(example_fun, xs))
print(v)
###Output
[DeviceArray(4., dtype=float32), DeviceArray(8., dtype=float32), DeviceArray(12., dtype=float32), DeviceArray(16., dtype=float32), DeviceArray(20., dtype=float32), DeviceArray(24., dtype=float32), DeviceArray(28., dtype=float32), DeviceArray(32., dtype=float32), DeviceArray(36., dtype=float32)]
###Markdown
The following fails.
###Code
v = vmap(example_fun)(xs)
print(v)
###Output
_____no_output_____
###Markdown
StochasticsJAX is designed to be deterministic, but in some cases, we want to introduce randomness in a controlled way, and to reason about it. We discuss this below Random number generationOne of the biggest differences from NumPy is the way Jax treates pseudo random number generation (PRNG).This is because Jax does not maintain any global state, i.e., it is purely functional.This design "provides reproducible results invariant to compilation boundaries and backends,while also maximizing performance by enabling vectorized generation and parallelization across random calls"(to quote [the official page](https://github.com/google/jaxa-brief-tour)).For example, consider this Numpy snippet. Each call to np.random.uniform updates the global state. The value of foo() is therefore only guaranteed to give the same result every time if we evaluate bar() and baz() in the same order (eg left to right). This is why foo1 and foo2 give different answers even though mathematically they shouldn't (we cannot just substitute in the value of a variable and derive the result, so we are violating "referential transparency").
###Code
import numpy as np
def bar():
return np.random.uniform(size=(3))
def baz():
return np.random.uniform(size=(3))
def foo(seed):
np.random.seed(seed)
return bar() + 2 * baz()
def foo1(seed):
np.random.seed(seed)
a = bar()
b = 2 * baz()
return a + b
def foo2(seed):
np.random.seed(seed)
a = 2 * baz()
b = bar()
return a + b
seed = 0
print(foo(seed))
print(foo1(seed))
print(foo2(seed))
###Output
[1.639 1.562 1.895]
[1.639 1.562 1.895]
[1.643 1.854 1.851]
###Markdown
Jax may evaluate parts of expressions such as `bar() + baz()` in parallel, which would violate reproducibility. To prevent this, the user must pass in an explicit PRNG key to every function that requires a source of randomness. Using the same key will give the same results. See the example below.
###Code
key = random.PRNGKey(0)
print(random.normal(key, shape=(3,))) # [ 1.81608593 -0.48262325 0.33988902]
print(random.normal(key, shape=(3,))) # [ 1.81608593 -0.48262325 0.33988902] ## identical results
###Output
[ 1.816 -0.483 0.34 ]
[ 1.816 -0.483 0.34 ]
###Markdown
When generating independent samples, it is important to use different keys, to ensure results are not correlated. We can do this by *splitting* the key into the the 'master' key (which will be used in later parts of the code via splitting), and the 'subkey', which is used temporarily to generate randomness and then thrown away, as we illustrate below.
###Code
# To make a new key, we split the current key into two pieces.
key, subkey = random.split(key)
print(random.normal(subkey, shape=(3,))) # [ 1.1378783 -1.22095478 -0.59153646]
# We can continue to split off new pieces from the global key.
key, subkey = random.split(key)
print(random.normal(subkey, shape=(3,))) # [-0.06607265 0.16676566 1.17800343]
###Output
[ 1.138 -1.221 -0.592]
[-0.066 0.167 1.178]
###Markdown
We now reimplement the numpy example in Jax and show that we get the result no matter the order of evaluation of bar and baz.
###Code
def bar(key):
return jax.random.uniform(key, shape=(3,))
def baz(key):
return jax.random.uniform(key, shape=(3,))
def foo(key):
subkey1, subkey2 = random.split(key, num=2)
return bar(subkey1) + 2 * baz(subkey2)
def foo1(key):
subkey1, subkey2 = random.split(key, num=2)
a = bar(subkey1)
b = 2 * baz(subkey2)
return a + b
def foo2(key):
subkey1, subkey2 = random.split(key, num=2)
a = 2 * baz(subkey2)
b = bar(subkey1)
return a + b
key = random.PRNGKey(0)
key, subkey = random.split(key)
print(foo(subkey))
print(foo1(subkey))
print(foo2(subkey))
###Output
[2.079 2.002 1.089]
[2.079 2.002 1.089]
[2.079 2.002 1.089]
###Markdown
In Jax (but not in python), a random draw of N samples in parallel will not give the same results as N draws of individual samples, as we show below.
###Code
key = random.PRNGKey(42)
subkeys = random.split(key, 3)
sequence = np.stack([jax.random.normal(subkey) for subkey in subkeys])
print("individually:", sequence)
key = random.PRNGKey(42)
print("all at once: ", jax.random.normal(key, shape=(3,)))
np.random.seed(0)
sequence = np.stack([np.random.normal() for i in range(3)])
print("individually:", sequence)
np.random.seed(0)
print("all at once: ", np.random.normal(size=(3,)))
###Output
individually: [1.764 0.4 0.979]
all at once: [1.764 0.4 0.979]
###Markdown
Probability distributionsThe [distrax library](https://github.com/deepmind/distrax) is a JAX-native implementation of some parts of the distrbitions library from [Tensorflow Probabilty (TFP)](https://www.tensorflow.org/probability). The main advantage is that the distrax source code is much easier to read and understand. For distributions not in distrax, it is possible to use TFP instead. Here is a brief example.
###Code
%%capture
!pip install git+git://github.com/deepmind/distrax.git
import distrax
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
key = jax.random.PRNGKey(1234)
mu = jnp.array([-1.0, 0.0, 1.0])
sigma = jnp.array([0.1, 0.2, 0.3])
dist_distrax = distrax.MultivariateNormalDiag(mu, sigma)
dist_tfp = tfd.MultivariateNormalDiag(mu, sigma)
samples = dist_distrax.sample(seed=key)
# Both print 1.775
print(dist_distrax.log_prob(samples))
print(dist_tfp.log_prob(samples))
###Output
1.7750063
1.7750063
###Markdown
Autograd In this section, we illustrate automatic differentation using JAX.For details, see see [this video](https://www.youtube.com/watch?v=wG_nF1awSSY&t=697s) or [The Autodiff Cookbook](https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.html). DerivativesWe can compute $(\nabla f)(x)$ using `grad(f)(x)`. For example, consider$f(x) = x^3 + 2x^2 - 3x + 1$$f'(x) = 3x^2 + 4x -3$$f''(x) = 6x + 4$$f'''(x) = 6$$f^{iv}(x) = 0$
###Code
f = lambda x: x**3 + 2 * x**2 - 3 * x + 1
dfdx = jax.grad(f)
d2fdx = jax.grad(dfdx)
d3fdx = jax.grad(d2fdx)
d4fdx = jax.grad(d3fdx)
print(dfdx(1.0))
print(d2fdx(1.0))
print(d3fdx(1.0))
print(d4fdx(1.0))
###Output
4.0
10.0
6.0
0.0
###Markdown
Partial derivatives$$\begin{align}f(x,y) &= x^2 + y \\\frac{\partial f}{\partial x} &= 2x \\\frac{\partial f}{\partial y} &= 1 \end{align}$$
###Code
def f(x, y):
return x**2 + y
# Partial derviatives
x = 2.0
y = 3.0
v, gx = value_and_grad(f, argnums=0)(x, y)
print(v)
print(gx)
gy = grad(f, argnums=1)(x, y)
print(gy)
###Output
7.0
4.0
1.0
###Markdown
Gradients Linear function: multi-input, scalar output.$$\begin{align}f(x; a) &= a^T x\\\nabla_x f(x;a) &= a\end{align}$$
###Code
def fun1d(x):
return jnp.dot(a, x)[0]
Din = 3
Dout = 1
a = np.random.normal(size=(Dout, Din))
x = np.random.normal(size=(Din,))
g = grad(fun1d)(x)
assert np.allclose(g, a)
# It is often useful to get the function value and gradient at the same time
val_grad_fn = jax.value_and_grad(fun1d)
v, g = val_grad_fn(x)
print(v)
print(g)
assert np.allclose(v, fun1d(x))
assert np.allclose(a, g)
###Output
1.9472518
[ 2.241 1.868 -0.977]
###Markdown
Linear function: multi-input, multi-output.$$\begin{align}f(x;A) &= A x \\\frac{\partial f(x;A)}{\partial x} &= A\end{align}$$
###Code
# We construct a multi-output linear function.
# We check forward and reverse mode give same Jacobians.
def fun(x):
return jnp.dot(A, x)
Din = 3
Dout = 4
A = np.random.normal(size=(Dout, Din))
x = np.random.normal(size=(Din,))
Jf = jacfwd(fun)(x)
Jr = jacrev(fun)(x)
assert np.allclose(Jf, Jr)
assert np.allclose(Jf, A)
###Output
_____no_output_____
###Markdown
Quadratic form.$$\begin{align}f(x;A) &= x^T A x \\\nabla_x f(x;A) &= (A+A^T) x\end{align}$$
###Code
D = 4
A = np.random.normal(size=(D, D))
x = np.random.normal(size=(D,))
quadfun = lambda x: jnp.dot(x, jnp.dot(A, x))
g = grad(quadfun)(x)
assert np.allclose(g, jnp.dot(A + A.T, x))
###Output
_____no_output_____
###Markdown
Chain rule applied to sigmoid function.$$\begin{align}\mu(x;w) &=\sigma(w^T x) \\\nabla_w \mu(x;w) &= \sigma'(w^T x) x \\\sigma'(a) &= \sigma(a) * (1-\sigma(a)) \end{align}$$
###Code
D = 4
w = np.random.normal(size=(D,))
x = np.random.normal(size=(D,))
y = 0
def sigmoid(x):
return 0.5 * (jnp.tanh(x / 2.0) + 1)
def mu(w):
return sigmoid(jnp.dot(w, x))
def deriv_mu(w):
return mu(w) * (1 - mu(w)) * x
deriv_mu_jax = grad(mu)
print(deriv_mu(w))
print(deriv_mu_jax(w))
assert np.allclose(deriv_mu(w), deriv_mu_jax(w), atol=1e-3)
###Output
[-0.13 -0.017 -0.072 0.031]
[-0.13 -0.017 -0.072 0.031]
###Markdown
Auxiliary return valuesA function can return its value and other auxiliary results; the latter are not differentiated.
###Code
def f(x, y):
return x**2 + y, 42
x = 2.0
y = 3.0
(v, aux), g = value_and_grad(f, has_aux=True)(x, y)
print(v)
print(aux)
print(g)
###Output
7.0
42
4.0
###Markdown
JacobiansExample: Linear function: multi-input, multi-output.$$\begin{align}f(x;A) &= A x \\\frac{\partial f(x;A)}{\partial x} &= A\end{align}$$
###Code
# We construct a multi-output linear function.
# We check forward and reverse mode give same Jacobians.
def fun(x):
return jnp.dot(A, x)
Din = 3
Dout = 4
A = np.random.normal(size=(Dout, Din))
x = np.random.normal(size=(Din,))
Jf = jacfwd(fun)(x)
Jr = jacrev(fun)(x)
assert np.allclose(Jf, Jr)
###Output
_____no_output_____
###Markdown
HessiansQuadratic form.$$\begin{align}f(x;A) &= x^T A x \\\nabla_x^2 f(x;A) &= A + A^T\end{align}$$
###Code
D = 4
A = np.random.normal(size=(D, D))
x = np.random.normal(size=(D,))
quadfun = lambda x: jnp.dot(x, jnp.dot(A, x))
H1 = hessian(quadfun)(x)
assert np.allclose(H1, A + A.T)
def my_hessian(fun):
return jacfwd(jacrev(fun))
H2 = my_hessian(quadfun)(x)
assert np.allclose(H1, H2)
###Output
_____no_output_____
###Markdown
Example: Binary logistic regression
###Code
def sigmoid(x):
return 0.5 * (jnp.tanh(x / 2.0) + 1)
def predict_single(w, x):
return sigmoid(jnp.dot(w, x)) # <(D) , (D)> = (1) # inner product
def predict_batch(w, X):
return sigmoid(jnp.dot(X, w)) # (N,D) * (D,1) = (N,1) # matrix-vector multiply
# negative log likelihood
def loss(weights, inputs, targets):
preds = predict_batch(weights, inputs)
logprobs = jnp.log(preds) * targets + jnp.log(1 - preds) * (1 - targets)
return -jnp.sum(logprobs)
D = 2
N = 3
w = jax.random.normal(key, shape=(D,))
X = jax.random.normal(key, shape=(N, D))
y = jax.random.choice(key, 2, shape=(N,)) # uniform binary labels
# logits = jnp.dot(X, w)
# y = jax.random.categorical(key, logits)
print(loss(w, X, y))
# Gradient function
grad_fun = grad(loss)
# Gradient of each example in the batch - 2 different ways
grad_fun_w = partial(grad_fun, w)
grads = vmap(grad_fun_w)(X, y)
print(grads)
assert grads.shape == (N, D)
grads2 = vmap(grad_fun, in_axes=(None, 0, 0))(w, X, y)
assert np.allclose(grads, grads2)
# Gradient for entire batch
grad_sum = jnp.sum(grads, axis=0)
assert grad_sum.shape == (D,)
print(grad_sum)
# Textbook implementation of gradient
def NLL_grad(weights, batch):
X, y = batch
N = X.shape[0]
mu = predict_batch(weights, X)
g = jnp.sum(jnp.dot(jnp.diag(mu - y), X), axis=0)
return g
grad_sum_batch = NLL_grad(w, (X, y))
print(grad_sum_batch)
assert np.allclose(grad_sum, grad_sum_batch)
# We can also compute Hessians, as we illustrate below.
hessian_fun = hessian(loss)
# Hessian on one example
H0 = hessian_fun(w, X[0, :], y[0])
print("Hessian(example 0)\n{}".format(H0))
# Hessian for batch
Hbatch = vmap(hessian_fun, in_axes=(None, 0, 0))(w, X, y)
print("Hbatch shape {}".format(Hbatch.shape))
Hbatch_sum = jnp.sum(Hbatch, axis=0)
print("Hbatch sum\n {}".format(Hbatch_sum))
# Textbook implementation of Hessian
def NLL_hessian(weights, batch):
X, y = batch
mu = predict_batch(weights, X)
S = jnp.diag(mu * (1 - mu))
H = jnp.dot(jnp.dot(X.T, S), X)
return H
H2 = NLL_hessian(w, (X, y))
assert np.allclose(Hbatch_sum, H2, atol=1e-2)
###Output
_____no_output_____
###Markdown
Vector Jacobian Products (VJP) and Jacobian Vector Products (JVP)Suppose $f1(x) = W x$ for fixed W, so $J(x) = W$, and $u^T J(x) = W^T u$.Instead of computing $J$ explicitly and then multiplying by $u$, wecan do this in one operation.
###Code
n = 3
m = 2
W = jax.random.normal(key, shape=(m, n))
x = jax.random.normal(key, shape=(n,))
u = jax.random.normal(key, shape=(m,))
def f1(x):
return jnp.dot(W, x)
J1 = jacfwd(f1)(x)
print(J1.shape)
assert np.allclose(J1, W)
tmp1 = jnp.dot(u.T, J1)
print(tmp1)
(val, jvp_fun) = jax.vjp(f1, x)
tmp2 = jvp_fun(u)
assert np.allclose(tmp1, tmp2)
tmp3 = np.dot(W.T, u)
assert np.allclose(tmp1, tmp3)
###Output
(2, 3)
[ 0.888 -0.538 -0.539]
###Markdown
Suppose$f2(W) = W x$ for fixed $x$.Now $J(W) = \text{something complex}$,but $u^T J(W) = J(W)^T u = u x^T$.
###Code
def f2(W):
return jnp.dot(W, x)
J2 = jacfwd(f2)(W)
print(J2.shape)
tmp1 = jnp.dot(u.T, J2)
print(tmp1)
print(tmp1.shape)
(val, jvp_fun) = jax.vjp(f2, W)
tmp2 = jvp_fun(u)
assert np.allclose(tmp1, tmp2)
tmp3 = np.outer(u, x)
assert np.allclose(tmp1, tmp3)
###Output
(2, 2, 3)
[[-0.009 -0.032 -0.474]
[ 0.005 0.019 0.286]]
(2, 3)
###Markdown
Stop-gradientSometimes we want to take the gradient of a complex expression wrt some parameters $\theta$, but treating $\theta$ as a constant for some parts of the expression. For example, consider the TD(0) update in reinforcement learning, which has the following form:$\Delta \theta = (r_t + v_{\theta}(s_t) - v_{\theta}(s_{t-1})) \nabla v_{\theta}(s_{t-1})$where $s$ is the state, $r$ is the reward, and $v$ is the value function.This update is not the gradient of any loss function.However, if the dependency of the target $r_t + v_{\theta}(s_t)$ on the parameter $\theta$ is ignored, it can be written as the gradient of the pseudo loss function$L(\theta) = [r_t + v_{\theta}(s_t) - v_{\theta}(s_{t-1})]^2$since$\nabla_{\theta} L(\theta) = 2 [r_t + v_{\theta}(s_t) - v_{\theta}(s_{t-1})] \nabla v_{\theta}(s_{t-1})$. We can implement this in JAX using `stop_gradient`, as we show below.
###Code
def td_loss(theta, s_prev, r_t, s_t):
v_prev = value_fn(theta, s_prev)
target = r_t + value_fn(theta, s_t)
return 0.5 * (jax.lax.stop_gradient(target) - v_prev) ** 2
td_update = jax.grad(td_loss)
# An example transition.
s_prev = jnp.array([1.0, 2.0, -1.0])
r_t = jnp.array(1.0)
s_t = jnp.array([2.0, 1.0, 0.0])
# Value function and initial parameters
value_fn = lambda theta, state: jnp.dot(theta, state)
theta = jnp.array([0.1, -0.1, 0.0])
print(td_update(theta, s_prev, r_t, s_t))
###Output
[-1.2 -2.4 1.2]
###Markdown
Straight through estimatorThe straight-through estimator is a trick for defining a 'gradient' of a function that is otherwise non-differentiable. Given a non-differentiable function $f : \mathbb{R}^n \to \mathbb{R}^n$ that is used as part of a larger function that we wish to find a gradient of, we simply pretend during the backward pass that $f$ is the identity function, so gradients pass through $f$ ignoring the $f'$ term. This can be implemented neatly using `jax.lax.stop_gradient`.Here is an example of a non-differentiable function that converts a soft probability distribution to a one-hot vector (discretization).
###Code
def onehot(labels, num_classes):
y = labels[..., None] == jnp.arange(num_classes)[None]
return y.astype(jnp.float32)
def quantize(y_soft):
y_hard = onehot(jnp.argmax(y_soft), 3)[0]
return y_hard
y_soft = np.array([0.1, 0.2, 0.7])
print(quantize(y_soft))
###Output
[0. 0. 1.]
###Markdown
Now suppose we define some linear function of the quantized variable of the form $f(y) = w^T q(y)$. If $w=[1,2,3]$ and $q(y)=[0,0,1]$, we get $f(y) = 3$. But the gradient is 0 because $q$ is not differentiable.
###Code
def f(y):
w = jnp.array([1, 2, 3])
yq = quantize(y)
return jnp.dot(w, yq)
print(f(y_soft))
print(grad(f)(y_soft))
###Output
3.0
[0. 0. 0.]
###Markdown
To use the straight-through estimator, we replace $q(y)$ with $$y + SG(q(y)-y)$$, where SG is stop gradient. In the forwards pass, we have $y+q(y)-y=q(y)$. In the backwards pass, the gradient of SG is 0, so we effectively replace $q(y)$ with $y$. So in the backwarsd pass we have$$\begin{align}f(y) &= w^T q(y) \approx w^T y \\\nabla_y f(y) &\approx w\end{align}$$
###Code
def f_ste(y):
w = jnp.array([1, 2, 3])
yq = quantize(y)
yy = y + jax.lax.stop_gradient(yq - y) # gives yq on fwd, and y on backward
return jnp.dot(w, yy)
print(f_ste(y_soft))
print(grad(f_ste)(y_soft))
###Output
3.0
[1. 2. 3.]
###Markdown
Per-example gradientsIn some applications, we want to compute the gradient for every example in a batch, not just the sum of gradients over the batch. This is hard in other frameworks like TF and PyTorch but easy in JAX, as we show below.
###Code
def loss(w, x):
return jnp.dot(w, x)
w = jnp.ones((3,))
x0 = jnp.array([1.0, 2.0, 3.0])
x1 = 2 * x0
X = jnp.stack([x0, x1])
print(X.shape)
perex_grads = jax.jit(jax.vmap(jax.grad(loss), in_axes=(None, 0)))
print(perex_grads(w, X))
###Output
(2, 3)
[[1. 2. 3.]
[2. 4. 6.]]
###Markdown
To explain the above code in more depth, note that the vmap converts the function loss to take a batch of inputs for each of its arguments, and returns a batch of outputs. To make it work with a single weight vector, we specify in_axes=(None,0), meaning the first argument (w) is not replicated, and the second argument (x) is replicated along dimension 0.
###Code
gradfn = jax.grad(loss)
W = jnp.stack([w, w])
print(jax.vmap(gradfn)(W, X))
print(jax.vmap(gradfn, in_axes=(None, 0))(w, X))
###Output
[[1. 2. 3.]
[2. 4. 6.]]
[[1. 2. 3.]
[2. 4. 6.]]
###Markdown
OptimizationThe [Optax library](https://github.com/deepmind/optax) implements many common optimizers. Below is a simple example.
###Code
%%capture
!pip install git+git://github.com/deepmind/optax.git
import optax
num_weights = 2
params = {"w": jnp.ones((num_weights,))}
num_ex = 3
xs = 2 * jnp.ones((num_ex, num_weights))
ys = jnp.ones(num_ex)
compute_loss_single = lambda params, x, y: optax.l2_loss(params["w"].dot(x), y)
compute_loss = lambda params, xs, ys: jnp.sum(jax.vmap(compute_loss_single, in_axes=[None, 0, 0])(params, xs, ys))
print("original params ", params)
print("loss ", compute_loss(params, xs, ys))
# create a stateful optimizer
learning_rate = 0.1
optimizer = optax.adam(learning_rate)
opt_state = optimizer.init(params)
print("original state ", opt_state)
# compute gradients
grads = jax.grad(compute_loss)(params, xs, ys)
print("grads ", grads)
# updage params (and optstate) given gradients
updates, opt_state = optimizer.update(grads, opt_state)
params = optax.apply_updates(params, updates)
print("updated params ", params)
print("updated state ", opt_state)
###Output
original params {'w': DeviceArray([1., 1.], dtype=float32)}
loss 13.5
original state [ScaleByAdamState(count=DeviceArray(0, dtype=int32), mu={'w': DeviceArray([0., 0.], dtype=float32)}, nu={'w': DeviceArray([0., 0.], dtype=float32)}), EmptyState()]
grads {'w': DeviceArray([18., 18.], dtype=float32)}
updated params {'w': DeviceArray([0.9, 0.9], dtype=float32)}
updated state [ScaleByAdamState(count=DeviceArray(1, dtype=int32), mu={'w': DeviceArray([1.8, 1.8], dtype=float32)}, nu={'w': DeviceArray([0.324, 0.324], dtype=float32)}), EmptyState()]
###Markdown
JIT (just in time compilation) In this section, we illustrate how to use the Jax JIT compiler to make code go much faster (even on a CPU). It does this by compiling the computational graph into low-level XLA primitives, potentially fusing multiple sequential operations into a single op. However, it does not work on arbitrary Python code, as we explain below.
###Code
def slow_f(x):
# Element-wise ops see a large benefit from fusion
return x * x + x * 2.0
x = jnp.ones((5000, 5000))
%timeit slow_f(x)
fast_f = jit(slow_f)
%timeit fast_f(x)
assert np.allclose(slow_f(x), fast_f(x))
###Output
100 loops, best of 5: 3.11 ms per loop
The slowest run took 15.74 times longer than the fastest. This could mean that an intermediate result is being cached.
1000 loops, best of 5: 918 µs per loop
###Markdown
We can also add the `@jit` decorator in front of a function.
###Code
@jit
def faster_f(x):
return x * x + x * 2.0
%timeit faster_f(x)
assert np.allclose(faster_f(x), fast_f(x))
###Output
The slowest run took 20.30 times longer than the fastest. This could mean that an intermediate result is being cached.
1000 loops, best of 5: 918 µs per loop
###Markdown
How it works: Jaxprs and tracing In this section, we briefly explain the mechanics behind JIT, which will help you understand when it does not work.First, consider this function.
###Code
def f(x):
y = jnp.ones((1, 5)) * x
return y
###Output
_____no_output_____
###Markdown
When a function is first executed (applied to an argument), it is converted to an intermediate representatio called a JAX expression or jaxpr, by a process called tracing, as we show below.
###Code
print(f(3.0))
print(jax.make_jaxpr(f)(3.0))
###Output
[[3. 3. 3. 3. 3.]]
{ lambda ; a.
let b = broadcast_in_dim[ broadcast_dimensions=( )
shape=(1, 5) ] 1.0
c = convert_element_type[ new_dtype=float32
weak_type=False ] a
d = mul b c
in (d,) }
###Markdown
The XLA JIT compiler can then convert the jaxpr to code that runs fast on a CPU, GPU or TPU; the original python code is no longer needed.
###Code
f_jit = jit(f)
print(f_jit(3.0))
###Output
[[3. 3. 3. 3. 3.]]
###Markdown
However, the jaxpr is created by tracing the function for a specific value. If different code is executed depending on the value of the input arguments, the resulting jaxpr will be different, so the function cannot be JITed, as we illustrate below.
###Code
def f(x):
if x > 0:
return x
else:
return 2 * x
print(f(3.0))
f_jit = jit(f)
print(f_jit(3.0))
###Output
3.0
###Markdown
Jit will create a new compiled version for each different ShapedArray, but will reuse the code for different values of the same shape. If the code path depends on the concrete value, we can either just jit a subfunction (whose code path is constant), or we can create a different jaxpr for each concrete value of the input arguments as we explain below. Static argnumNote that JIT compilation requires that the control flow through the function can be determined by the shape (but not concrete value) of its inputs. The function below violates this, since when x0, it takes the other.
###Code
@jit
def f(x):
if x > 0:
return x
else:
return 2 * x
# This will fail!
try:
print(f(3))
except Exception as e:
print("ERROR:", e)
###Output
ERROR: Abstract tracer value encountered where concrete value is expected: Traced<ShapedArray(bool[], weak_type=True)>with<DynamicJaxprTrace(level=0/1)>
The problem arose with the `bool` function.
While tracing the function f at <ipython-input-92-94e6eda28128>:1 for jit, this concrete value was not available in Python because it depends on the value of the argument 'x'.
See https://jax.readthedocs.io/en/latest/errors.html#jax.errors.ConcretizationTypeError
###Markdown
We can fix this by telling JAX to trace the control flow through the function using concrete values of some of its arguments. JAX will then compile different versions, depending on the input values. See below for an example.
###Code
def f(x):
if x > 0:
return x
else:
return 2 * x
f = jit(f, static_argnums=(0,))
print(f(3))
@partial(jit, static_argnums=(0,))
def f(x):
if x > 0:
return x
else:
return 2 * x
print(f(3))
###Output
3
###Markdown
Jit and vmap Unfortunately, the static argnum method fails when the function is passed to vmap, because the latter can take arguments of different shape.
###Code
xs = jnp.arange(5)
@partial(jit, static_argnums=(0,))
def f(x):
if x > 0:
return x
else:
return 2 * x
ys = vmap(f)(xs)
###Output
_____no_output_____
###Markdown
Side effects Since the jaxpr is created only once, if your function has global side-effects, such as using print, they will only happen once, even if the function is called multiple times. See example below.
###Code
def f(x):
print("x", x)
y = 2 * x
print("y", y)
return y
y1 = f(2)
print("f", y1)
print("\ncall function a second time")
y1 = f(2)
print("f", y1)
print("\njit version follows")
g = jax.jit(f)
y2 = g(2)
print("f", y2)
print("\ncall jitted function a second time")
y2 = g(2)
print("f", y2)
###Output
x 2
y 4
f 4
call function a second time
x 2
y 4
f 4
jit version follows
x Traced<ShapedArray(int32[], weak_type=True)>with<DynamicJaxprTrace(level=0/1)>
y Traced<ShapedArray(int32[], weak_type=True)>with<DynamicJaxprTrace(level=0/1)>
f 4
call jitted function a second time
f 4
###Markdown
CachingIf you write `g=jax.jit(f)`, then f will get compiled and the XLA code will be cahced. Subsequent calls to g reuse the cached code for speed. But if the jit is called inside a loop, it is effectively making a new f each time, which is slow. So typically jit occurs in the outermost scope (modulo being constant shape).Also, if you specify `static_argnums`, then the cached code will be used only for the same values of arguments labelled as static. If any of them change, recompilation occurs. StringsJit does not work with functions that consume or return strings.
###Code
def f(x: int, y: str):
if y == "add":
return x + 1
else:
return x - 1
print(f(42, "add"))
print(f(42, "sub"))
fj = jax.jit(f)
print(fj(42, "add"))
###Output
43
41
###Markdown
PytreesA Pytree is a container of leaf elements and/or more pytrees. Containers include lists, tuples, and dicts. A leaf element is anything that’s not a pytree, e.g. an array.Pytrees are useful for representing hierarchical sets of parameters for DNNs (and other structured dsta). Simple example
###Code
from jax import tree_util
# a simple pytree
t1 = [1, {"k1": 2, "k2": (3, 4)}, 5]
print("tree", t1)
leaves = jax.tree_leaves(t1)
print("num leaves", len(leaves))
print(leaves)
t4 = [jnp.array([1, 2, 3]), "foo"]
print("tree", t4)
leaves = jax.tree_leaves(t4)
print("num leaves", len(leaves))
print(leaves)
###Output
tree [1, {'k1': 2, 'k2': (3, 4)}, 5]
num leaves 5
[1, 2, 3, 4, 5]
tree [DeviceArray([1, 2, 3], dtype=int32), 'foo']
num leaves 2
[DeviceArray([1, 2, 3], dtype=int32), 'foo']
###Markdown
TreemapWe can map functions down a pytree in the same way that we can map a function down a list. We can also combine elements in two pytrees that have the same shape to make a third pytree.
###Code
t1 = [1, {"k1": 2, "k2": (3, 4)}, 5]
print(t1)
t2 = tree_util.tree_map(lambda x: x * x, t1)
print("square each element", t2)
t3 = tree_util.tree_map(lambda x, y: x + y, t1, t2)
print("t1+t2", t3)
###Output
[1, {'k1': 2, 'k2': (3, 4)}, 5]
square each element [1, {'k1': 4, 'k2': (9, 16)}, 25]
t1+t2 [2, {'k1': 6, 'k2': (12, 20)}, 30]
###Markdown
If we have a list of dicts, we can convert to a dict of lists, as shown below.
###Code
data = [dict(t=1, obs="a", val=-1), dict(t=2, obs="b", val=-2), dict(t=3, obs="c", val=-3)]
data2 = jax.tree_map(lambda d0, d1, d2: list((d0, d1, d2)), data[0], data[1], data[2])
print(data2)
def join_trees(list_of_trees):
d = jax.tree_map(lambda *xs: list(xs), *list_of_trees)
return d
print(join_trees(data))
###Output
{'obs': ['a', 'b', 'c'], 't': [1, 2, 3], 'val': [-1, -2, -3]}
{'obs': ['a', 'b', 'c'], 't': [1, 2, 3], 'val': [-1, -2, -3]}
###Markdown
Flattening / Unflattening
###Code
t1 = [1, {"k1": 2, "k2": (3, 4)}, 5]
print(t1)
leaves, treedef = jax.tree_util.tree_flatten(t1)
print(leaves)
print(treedef)
t2 = jax.tree_util.tree_unflatten(treedef, leaves)
print(t2)
###Output
[1, {'k1': 2, 'k2': (3, 4)}, 5]
[1, 2, 3, 4, 5]
PyTreeDef([*, {'k1': *, 'k2': (*, *)}, *])
[1, {'k1': 2, 'k2': (3, 4)}, 5]
###Markdown
Example: Linear regression In this section we show how to use pytrees as a container for parameters of a linear reregression model. The code is based on the [flax JAX tutorial](https://flax.readthedocs.io/en/latest/notebooks/jax_for_the_impatient.html). When we compute the gradient, it will also be a pytree, and will have the same shape as the parameters, so we can add the params to the gradient without having to flatten and unflatten the parameters.
###Code
# Create the predict function from a set of parameters
def make_predict_pytree(params):
def predict(x):
return jnp.dot(params["W"], x) + params["b"]
return predict
# Create the loss from the data points set
def make_mse_pytree(x_batched, y_batched): # returns fn(params)->real
def mse(params):
# Define the squared loss for a single pair (x,y)
def squared_error(x, y):
y_pred = make_predict_pytree(params)(x)
return jnp.inner(y - y_pred, y - y_pred) / 2.0
# We vectorize the previous to compute the average of the loss on all samples.
return jnp.mean(jax.vmap(squared_error)(x_batched, y_batched), axis=0)
return jax.jit(mse) # And finally we jit the result.
# Set problem dimensions
N = 20
xdim = 10
ydim = 5
# Generate random ground truth W and b
key = random.PRNGKey(0)
Wtrue = random.normal(key, (ydim, xdim))
btrue = random.normal(key, (ydim,))
params_true = {"W": Wtrue, "b": btrue}
true_predict_fun = make_predict_pytree(params_true)
# Generate data with additional observation noise
X = random.normal(key, (N, xdim))
Ytrue = jax.vmap(true_predict_fun)(X)
Y = Ytrue + 0.1 * random.normal(key, (N, ydim))
# Generate MSE for our samples
mse_fun = make_mse_pytree(X, Y)
# Initialize estimated W and b with zeros.
params = {"W": jnp.zeros_like(Wtrue), "b": jnp.zeros_like(btrue)}
mse_pytree = make_mse_pytree(X, Y)
print(mse_pytree(params_true))
print(mse_pytree(params))
print(jax.grad(mse_pytree)(params))
alpha = 0.3 # Gradient step size
print('Loss for "true" W,b: ', mse_pytree(params_true))
for i in range(101):
gradients = jax.grad(mse_pytree)(params)
params = jax.tree_map(lambda old, grad: old - alpha * grad, params, gradients)
if i % 10 == 0:
print("Loss step {}: ".format(i), mse_pytree(params))
print(jax.tree_map(lambda x, y: np.allclose(x, y, atol=1e-1), params, params_true))
###Output
{'W': True, 'b': True}
###Markdown
Compare the above to what the training code would look likeif W and b were passed in as separate arguments:```for i in range(101): grad_W = jax.grad(mse_fun,0)(What,bhat) grad_b = jax.grad(mse_fun,1)(What,bhat) What = What - alpha*grad_W bhat = bhat - alpha*grad_b if (i%10==0): print("Loss step {}: ".format(i), mse_fun(What,bhat)``` Example: MLPsWe now show a more interesting example, from the Deepmind tutorial, where we fit an MLP using SGD. The basic structure is similar to the linear regression case.
###Code
# define the model
def init_mlp_params(layer_widths):
params = []
for n_in, n_out in zip(layer_widths[:-1], layer_widths[1:]):
params.append(
dict(weights=np.random.normal(size=(n_in, n_out)) * np.sqrt(2 / n_in), biases=np.ones(shape=(n_out,)))
)
return params
def forward(params, x):
*hidden, last = params
for layer in hidden:
x = jax.nn.relu(x @ layer["weights"] + layer["biases"])
return x @ last["weights"] + last["biases"]
def loss_fn(params, x, y):
return jnp.mean((forward(params, x) - y) ** 2)
# MLP with 2 hidden layers and linear output
np.random.seed(0)
params = init_mlp_params([1, 128, 128, 1])
jax.tree_map(lambda x: x.shape, params)
LEARNING_RATE = 0.0001
@jax.jit
def update(params, x, y):
grads = jax.grad(loss_fn)(params, x, y)
return jax.tree_map(lambda p, g: p - LEARNING_RATE * g, params, grads)
np.random.seed(0)
xs = np.random.normal(size=(200, 1))
ys = xs**2
for _ in range(1000):
params = update(params, xs, ys)
plt.scatter(xs, ys, label="truth")
plt.scatter(xs, forward(params, xs), label="Prediction")
plt.legend()
###Output
_____no_output_____
###Markdown
Looping constructsFor loops in Python are slow, even when JIT-compiled. However, there are built-in primitives for loops that are fast, as we illustrate below. For loops.The semantics of the for loop function in JAX is as follows:```def fori_loop(lower, upper, body_fun, init_val): val = init_val for i in range(lower, upper): val = body_fun(i, val) return val```We see that ```val``` is used to accumulate the results across iterations.Below is an example.
###Code
# sum from 1 to N = N*(N+1)/2
def sum_exact(N):
return int(N * (N + 1) / 2)
def sum_slow(N):
s = 0
for i in range(1, N + 1):
s += i
return s
N = 10
assert sum_slow(N) == sum_exact(N)
def sum_fast(N):
s = jax.lax.fori_loop(1, N + 1, lambda i, partial_sum: i + partial_sum, 0)
return s
assert sum_fast(N) == sum_exact(N)
N = 1000
%timeit sum_slow(N)
%timeit sum_fast(N)
# Let's do more compute per step of the for loop
D = 10
X = jax.random.normal(key, shape=(D, D))
def sum_slow(N):
s = jnp.zeros_like(X)
for i in range(1, N + 1):
s += jnp.dot(X, X)
return s
def sum_fast(N):
s = jnp.zeros_like(X)
s = jax.lax.fori_loop(1, N + 1, lambda i, s: s + jnp.dot(X, X), s)
return s
N = 10
assert np.allclose(sum_fast(N), sum_slow(N))
N = 1000
%timeit sum_slow(N)
%timeit sum_fast(N)
###Output
1 loop, best of 5: 298 ms per loop
10 loops, best of 5: 27.8 ms per loop
###Markdown
While loopsHere is the semantics of the JAX while loop```def while_loop(cond_fun, body_fun, init_val): val = init_val while cond_fun(val): val = body_fun(val) return val```Below is an example.
###Code
def sum_slow_while(N):
s = 0
i = 0
while i <= N:
s += i
i += 1
return s
def sum_fast_while(N):
init_val = (0, 0)
def cond_fun(val):
s, i = val
return i <= N
def body_fun(val):
s, i = val
s += i
i += 1
return (s, i)
val = jax.lax.while_loop(cond_fun, body_fun, init_val)
s2 = val[0]
return s2
N = 10
assert sum_slow_while(N) == sum_exact(N)
assert sum_slow_while(N) == sum_fast_while(N)
N = 1000
%timeit sum_slow(N)
%timeit sum_fast(N)
###Output
1 loop, best of 5: 312 ms per loop
10 loops, best of 5: 28.3 ms per loop
###Markdown
ScanHere is the semantics of scan:```def scan(f, init, xs, length=None): if xs is None: xs = [None] * length carry = init ys = [] for x in xs: carry, y = f(carry, x) ys.append(y) return carry, np.stack(ys) ``` Here is an example where we use scan to sample from a discrete-time, discrete-state Markov chain.
###Code
init_dist = jnp.array([0.8, 0.2])
trans_mat = jnp.array([[0.9, 0.1], [0.5, 0.5]])
rng_key = jax.random.PRNGKey(0)
from jax.scipy.special import logit
seq_len = 15
initial_state = jax.random.categorical(rng_key, logits=logit(init_dist), shape=(1,))
def draw_state(prev_state, key):
logits = logit(trans_mat[:, prev_state])
state = jax.random.categorical(key, logits=logits.flatten(), shape=(1,))
return state, state
rng_key, rng_state, rng_obs = jax.random.split(rng_key, 3)
keys = jax.random.split(rng_state, seq_len - 1)
final_state, states = jax.lax.scan(draw_state, initial_state, keys)
print(states)
###Output
[[0]
[0]
[0]
[0]
[0]
[0]
[0]
[1]
[1]
[1]
[1]
[1]
[1]
[1]]
###Markdown
Common gotchas Handling stateIn this section, we discuss how to transform code that uses object-oriented programming (which can be stateful) to pure functional programming, which is stateless, as required by JAX. Our presentation is based on the Deepmind tutorial. To start, consider a simple class that maintains an internal counter, and when called, increments the counter and returns the next number from some sequence.
###Code
# import string
# DICTIONARY = list(string.ascii_lowercase)
SEQUENCE = jnp.arange(0, 100, 2)
class Counter:
def __init__(self):
self.n = 0
def count(self) -> int:
# res = DICTIONARY[self.n]
res = SEQUENCE[self.n]
self.n += 1
return res
def reset(self):
self.n = 0
counter = Counter()
for _ in range(3):
print(counter.count())
###Output
0
2
4
###Markdown
The trouble with the above code is that the call to `count` depends on the internal state of the object (the value `n`), even though this is not an argument to the function. (The code is therefoe said to violate 'referential transparency'.) When we Jit compile it, Jax will only call the code once (to convert to a jaxpr), so the side effect of updating `n` will not happen, resulting in incorrect behavior, as we show below,
###Code
counter.reset()
fast_count = jax.jit(counter.count)
for _ in range(3):
print(fast_count())
###Output
0
0
0
###Markdown
We can solve this problem by passing the state as an argument into the function.
###Code
CounterState = int
Result = int
class CounterV2:
def count(self, n: CounterState) -> Tuple[Result, CounterState]:
return SEQUENCE[n], n + 1
def reset(self) -> CounterState:
return 0
counter = CounterV2()
state = counter.reset()
for _ in range(3):
value, state = counter.count(state)
print(value)
###Output
0
2
4
###Markdown
This version is functionally pure, so jit-compiles nicely.
###Code
state = counter.reset()
fast_count = jax.jit(counter.count)
for _ in range(3):
value, state = fast_count(state)
print(value)
###Output
0
2
4
###Markdown
We can apply the same process to any stateful method to convert it into a stateless one. We took a class of the form```class StatefulClass state: State def stateful_method(*args, **kwargs) -> Output:```and turned it into a class of the form```class StatelessClass def stateless_method(state: State, *args, **kwargs) -> (Output, State):```This is a common [functional programming](https://en.wikipedia.org/wiki/Functional_programming) pattern, and, essentially, is the way that state is handled in all JAX programs (as we saw with the way Jax handles random number state, or parameters of a model that get updated).Note that the stateless version of the code no longer needs to use a class, but can instead group the functions into a common namespace using modules.In some cases (eg when working with DNNs), it is more convenient to write code in an OO way. There are several libraries (notably [Flax](https://github.com/google/flax) and [Haiku](https://github.com/deepmind/dm-haiku)) that let you define a model in an OO way, and then generate functionally pure code. Mutation of arrays Since JAX is functional, you cannot mutate arrays in place,since this makes program analysis and transformation very difficult. JAX requires a pure functional expression of a numerical program.Instead, JAX offers the functional update functions: `index_update`, `index_add`, `index_min`, `index_max`, and the `index` helper. These are illustrated below. However it is best to avoid these if possible, since they are slow.Note: If the input values of `index_update` aren't reused, jit-compiled code will perform these operations in-place, rather than making a copy.
###Code
# You cannot assign directly to elements of an array.
A = jnp.zeros((3, 3), dtype=np.float32)
# In place update of JAX's array will yield an error!
try:
A[1, :] = 1.0
except:
print("must use index_update")
from jax.ops import index, index_add, index_update
D = 3
A = 2 * jnp.ones((D, D))
print("original array:")
print(A)
A2 = index_update(A, index[1, :], 42.0) # A[1,:] = 42
print("original array:")
print(A) # unchanged
print("new array:")
print(A2)
A3 = A.at[1, :].set(42.0) # A3=np.copy(A), A3[1,:] = 42
print("original array:")
print(A) # unchanged
print("new array:")
print(A3)
A4 = A.at[1, :].mul(42.0) # A4=np.copy(A), A4[1,:] *= 42
print("original array:")
print(A) # unchanged
print("new array:")
print(A4)
###Output
original array:
[[2. 2. 2.]
[2. 2. 2.]
[2. 2. 2.]]
original array:
[[2. 2. 2.]
[2. 2. 2.]
[2. 2. 2.]]
new array:
[[ 2. 2. 2.]
[42. 42. 42.]
[ 2. 2. 2.]]
original array:
[[2. 2. 2.]
[2. 2. 2.]
[2. 2. 2.]]
new array:
[[ 2. 2. 2.]
[42. 42. 42.]
[ 2. 2. 2.]]
original array:
[[2. 2. 2.]
[2. 2. 2.]
[2. 2. 2.]]
new array:
[[ 2. 2. 2.]
[84. 84. 84.]
[ 2. 2. 2.]]
###Markdown
Implicitly casting lists to vectorsYou cannot treat a list of numbers as a vector. Instead you must explicitly create the vector using the np.array() constructor.
###Code
# You cannot treat a list of numbers as a vector.
try:
S = jnp.diag([1.0, 2.0, 3.0])
except:
print("must convert indices to np.array")
# Instead you should explicitly construct the vector.
S = jnp.diag(jnp.array([1.0, 2.0, 3.0]))
###Output
_____no_output_____ |
graphs_trees/bst_validate/bst_validate_solution.ipynb | ###Markdown
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Solution Notebook Problem: Determine if a tree is a valid binary search tree.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test) Constraints* Can the tree have duplicates? * Yes* If this is called on a None input, should we raise an exception? * Yes* Can we assume we already have a Node class? * Yes* Can we assume this fits in memory? * Yes Test CasesNone -> exceptionValid: 5 / \ 5 8 / \ / 4 6 7Invalid: 5 / \ 5 8 \ 20 AlgorithmWe'll use a recursive solution that valides left <= current < right, passing down the min and max values as we do a depth-first traversal.* If the node is None, return True* If min is set and the node's value <= min, return False* if max is set and the node's value > max, return False* Recursively call the validate function on node.left, updating max* Recursively call the validate function on node.right, updating min Complexity:* Time: O(n)* Space: O(h), where h is the height of the tree Code
###Code
%run ../bst/bst.py
import sys
class BstValidate(Bst):
def validate(self):
if self.root is None:
raise TypeError('No root node')
return self._validate(self.root)
def _validate(self, node, mininum=-sys.maxsize, maximum=sys.maxsize):
if node is None:
return True
if node.data <= mininum or node.data > maximum:
return False
if not self._validate(node.left, mininum, node.data):
return False
if not self._validate(node.right, node.data, maximum):
return False
return True
###Output
_____no_output_____
###Markdown
Unit Test
###Code
%%writefile test_bst_validate.py
from nose.tools import assert_equal
from nose.tools import raises
class TestBstValidate(object):
@raises(Exception)
def test_bst_validate_empty(self):
validate_bst(None)
def test_bst_validate(self):
bst = BstValidate(Node(5))
bst.insert(8)
bst.insert(5)
bst.insert(6)
bst.insert(4)
bst.insert(7)
assert_equal(bst.validate(), True)
bst = BstValidate(Node(5))
left = Node(5)
right = Node(8)
invalid = Node(20)
bst.root.left = left
bst.root.right = right
bst.root.left.right = invalid
assert_equal(bst.validate(), False)
print('Success: test_bst_validate')
def main():
test = TestBstValidate()
test.test_bst_validate_empty()
test.test_bst_validate()
if __name__ == '__main__':
main()
%run -i test_bst_validate.py
###Output
Success: test_bst_validate
###Markdown
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Solution Notebook Problem: Determine if a tree is a valid binary search tree.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test) Constraints* Can the tree have duplicates? * Yes* If this is called on a None input, should we raise an exception? * Yes* Can we assume we already have a Node class? * Yes* Can we assume this fits in memory? * Yes Test CasesNone -> exceptionValid: 5 / \ 5 8 / / 4 6 \ 7 Invalid: 5 / \ 5 8 \ 20 AlgorithmWe'll use a recursive solution that validates left <= current < right, passing down the min and max values as we do a depth-first traversal.* If the node is None, return True* If min is set and the node's value <= min, return False* if max is set and the node's value > max, return False* Recursively call the validate function on node.left, updating max* Recursively call the validate function on node.right, updating min Complexity:* Time: O(n)* Space: O(h), where h is the height of the tree Code
###Code
%run ../bst/bst.py
import sys
class BstValidate(Bst):
def validate(self):
if self.root is None:
raise TypeError('No root node')
return self._validate(self.root)
def _validate(self, node, minimum=-sys.maxsize, maximum=sys.maxsize):
if node is None:
return True
if node.data <= minimum or node.data > maximum:
return False
if not self._validate(node.left, minimum, node.data):
return False
if not self._validate(node.right, node.data, maximum):
return False
return True
###Output
_____no_output_____
###Markdown
Unit Test
###Code
%%writefile test_bst_validate.py
from nose.tools import assert_equal
from nose.tools import raises
class TestBstValidate(object):
@raises(Exception)
def test_bst_validate_empty(self):
validate_bst(None)
def test_bst_validate(self):
bst = BstValidate(Node(5))
bst.insert(8)
bst.insert(5)
bst.insert(6)
bst.insert(4)
bst.insert(7)
assert_equal(bst.validate(), True)
bst = BstValidate(Node(5))
left = Node(5)
right = Node(8)
invalid = Node(20)
bst.root.left = left
bst.root.right = right
bst.root.left.right = invalid
assert_equal(bst.validate(), False)
print('Success: test_bst_validate')
def main():
test = TestBstValidate()
test.test_bst_validate_empty()
test.test_bst_validate()
if __name__ == '__main__':
main()
%run -i test_bst_validate.py
###Output
Success: test_bst_validate
###Markdown
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Solution Notebook Problem: Determine if a tree is a valid binary search tree.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test) Constraints* Can the tree have duplicates? * Yes* If this is called on a None input, should we raise an exception? * Yes* Can we assume we already have a Node class? * Yes* Can we assume this fits in memory? * Yes Test CasesNone -> exceptionValid: 5 / \ 5 8 / / 4 6 \ 7 Invalid: 5 / \ 5 8 \ 20 AlgorithmWe'll use a recursive solution that validates left <= current < right, passing down the min and max values as we do a depth-first traversal.* If the node is None, return True* If min is set and the node's value <= min, return False* if max is set and the node's value > max, return False* Recursively call the validate function on node.left, updating max* Recursively call the validate function on node.right, updating min Complexity:* Time: O(n)* Space: O(h), where h is the height of the tree Code
###Code
%run ../bst/bst.py
import sys
class BstValidate(Bst):
def validate(self):
if self.root is None:
raise TypeError('No root node')
return self._validate(self.root)
def _validate(self, node, minimum=-sys.maxsize, maximum=sys.maxsize):
if node is None:
return True
if node.data <= minimum or node.data > maximum:
return False
if not self._validate(node.left, minimum, node.data):
return False
if not self._validate(node.right, node.data, maximum):
return False
return True
###Output
_____no_output_____
###Markdown
Unit Test
###Code
%%writefile test_bst_validate.py
import unittest
class TestBstValidate(unittest.TestCase):
def test_bst_validate_empty(self):
bst = BstValidate(None)
bst.validate()
def test_bst_validate(self):
bst = BstValidate(Node(5))
bst.insert(8)
bst.insert(5)
bst.insert(6)
bst.insert(4)
bst.insert(7)
self.assertEqual(bst.validate(), True)
bst = BstValidate(Node(5))
left = Node(5)
right = Node(8)
invalid = Node(20)
bst.root.left = left
bst.root.right = right
bst.root.left.right = invalid
self.assertEqual(bst.validate(), False)
print('Success: test_bst_validate')
def main():
test = TestBstValidate()
test.assertRaises(TypeError, test.test_bst_validate_empty)
test.test_bst_validate()
if __name__ == '__main__':
main()
%run -i test_bst_validate.py
###Output
Success: test_bst_validate
###Markdown
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Solution Notebook Problem: Determine if a tree is a valid binary search tree.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test) Constraints* Can the tree have duplicates? * Yes* If this is called on a None input, should we raise an exception? * Yes* Can we assume we already have a Node class? * Yes* Can we assume this fits in memory? * Yes Test CasesNone -> exceptionValid: 5 / \ 5 8 / / 4 6 \ 7 Invalid: 5 / \ 5 8 \ 20 AlgorithmWe'll use a recursive solution that validates left <= current < right, passing down the min and max values as we do a depth-first traversal.* If the node is None, return True* If min is set and the node's value <= min, return False* if max is set and the node's value > max, return False* Recursively call the validate function on node.left, updating max* Recursively call the validate function on node.right, updating min Complexity:* Time: O(n)* Space: O(h), where h is the height of the tree Code
###Code
%run ../bst/bst.py
import sys
class BstValidate(Bst):
def validate(self):
if self.root is None:
raise TypeError('No root node')
return self._validate(self.root)
def _validate(self, node, minimum=-sys.maxsize, maximum=sys.maxsize):
if node is None:
return True
if node.data <= minimum or node.data > maximum:
return False
if not self._validate(node.left, minimum, node.data):
return False
if not self._validate(node.right, node.data, maximum):
return False
return True
###Output
_____no_output_____
###Markdown
Unit Test
###Code
%%writefile test_bst_validate.py
import unittest
class TestBstValidate(unittest.TestCase):
def test_bst_validate_empty(self):
bst = BstValidate(None)
bst.validate()
def test_bst_validate(self):
bst = BstValidate(Node(5))
bst.insert(8)
bst.insert(5)
bst.insert(6)
bst.insert(4)
bst.insert(7)
self.assertEqual(bst.validate(), True)
bst = BstValidate(Node(5))
left = Node(5)
right = Node(8)
invalid = Node(20)
bst.root.left = left
bst.root.right = right
bst.root.left.right = invalid
self.assertEqual(bst.validate(), False)
print('Success: test_bst_validate')
def main():
test = TestBstValidate()
test.assertRaises(TypeError, test.test_bst_validate_empty)
test.test_bst_validate()
if __name__ == '__main__':
main()
%run -i test_bst_validate.py
###Output
Success: test_bst_validate
###Markdown
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Solution Notebook Problem: Determine if a tree is a valid binary search tree.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test) Constraints* Can the tree have duplicates? * Yes* Can we assume we already have a Node class? * Yes Test CasesValid: 5 / \ 5 8 / \ / 4 6 7Invalid: 5 / \ 5 8 \ 20 AlgorithmWe'll use a recursive solution that valides left <= current < right, passing down the min and max values as we do a depth-first traversal.* If the node is None, return False* If min is set and the node's value <= min, return False* if max is set and the node's value > max, return False* Recursively call the validate function on node.left, updating max* Recursively call the validate function on node.right, updating min Complexity:* Time: O(n)* Space: O(h), where h is the height of the tree Code
###Code
%run ../bst/bst.py
def validate_bst(node):
return __validate_bst__(node, None, None)
def __validate_bst__(node, mininum, maximum):
if node is None:
return True
if mininum is not None and node.data <= mininum:
return False
if maximum is not None and node.data > maximum:
return False
if not __validate_bst__(node.left, mininum, node.data):
return False
if not __validate_bst__(node.right, node.data, maximum):
return False
return True
###Output
_____no_output_____
###Markdown
Unit Test
###Code
%%writefile test_bst_validate.py
from nose.tools import assert_equal
class TestBstValidate(object):
def test_bst_validate(self):
node = Node(5)
insert(node, 8)
insert(node, 5)
insert(node, 6)
insert(node, 4)
insert(node, 7)
assert_equal(validate_bst(node), True)
root = Node(5)
left = Node(5)
right = Node(8)
invalid = Node(20)
root.left = left
root.right = right
root.left.right = invalid
assert_equal(validate_bst(root), False)
print('Success: test_bst_validate')
def main():
test = TestBstValidate()
test.test_bst_validate()
if __name__ == '__main__':
main()
%run -i test_bst_validate.py
###Output
Success: test_bst_validate
###Markdown
This notebook was prepared by [Donne Martin](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). Solution Notebook Problem: Determine if a tree is a valid binary search tree.* [Constraints](Constraints)* [Test Cases](Test-Cases)* [Algorithm](Algorithm)* [Code](Code)* [Unit Test](Unit-Test) Constraints* Can the tree have duplicates? * Yes* If this is called on a None input, should we raise an exception? * Yes* Can we assume we already have a Node class? * Yes* Can we assume this fits in memory? * Yes Test CasesNone -> exceptionValid: 5 / \ 5 8 / \ / 4 6 7Invalid: 5 / \ 5 8 \ 20 AlgorithmWe'll use a recursive solution that valides left <= current < right, passing down the min and max values as we do a depth-first traversal.* If the node is None, return True* If min is set and the node's value <= min, return False* if max is set and the node's value > max, return False* Recursively call the validate function on node.left, updating max* Recursively call the validate function on node.right, updating min Complexity:* Time: O(n)* Space: O(h), where h is the height of the tree Code
###Code
%run ../bst/bst.py
import sys
class BstValidate(Bst):
def validate(self):
if self.root is None:
raise Exception('No root node')
return self._validate(self.root)
def _validate(self, node, mininum=-sys.maxsize, maximum=sys.maxsize):
if node is None:
return True
if node.data <= mininum or node.data > maximum:
return False
if not self._validate(node.left, mininum, node.data):
return False
if not self._validate(node.right, node.data, maximum):
return False
return True
###Output
_____no_output_____
###Markdown
Unit Test
###Code
%%writefile test_bst_validate.py
from nose.tools import assert_equal
from nose.tools import raises
class TestBstValidate(object):
@raises(Exception)
def test_bst_validate_empty(self):
validate_bst(None)
def test_bst_validate(self):
bst = BstValidate(Node(5))
bst.insert(8)
bst.insert(5)
bst.insert(6)
bst.insert(4)
bst.insert(7)
assert_equal(bst.validate(), True)
bst = BstValidate(Node(5))
left = Node(5)
right = Node(8)
invalid = Node(20)
bst.root.left = left
bst.root.right = right
bst.root.left.right = invalid
assert_equal(bst.validate(), False)
print('Success: test_bst_validate')
def main():
test = TestBstValidate()
test.test_bst_validate_empty()
test.test_bst_validate()
if __name__ == '__main__':
main()
%run -i test_bst_validate.py
###Output
Success: test_bst_validate
|
tutorials/tutorial01.ipynb | ###Markdown
CE9010: Introduction to Data Analysis Semester 2 2018/19 Xavier Bresson Tutorial 1: Introduction to Python Objective $\bullet$ Basic operations in Python 1. ResourcesSlides: [Python introduction by Xavier Bresson][Python introduction by Xavier Bresson]: http://data-science-training-xb.s3-website.eu-west-2.amazonaws.com/All_lectures/Lecture02_python.pdfNotebook: [Python introduction tutorial by Justin Johnson][Python introduction tutorial by Justin Johnson]: https://github.com/kuleshov/cs228-material/blob/master/tutorials/python/cs228-python-tutorial.ipynb 2. Basic operations 2.1 Elementary algebra operations
###Code
5+6
5/8
5**8
###Output
_____no_output_____
###Markdown
2.2 Logical operations
###Code
1==1
2==1
2!=1
True & False
True | False
###Output
_____no_output_____
###Markdown
2.3 Assignment operations
###Code
x=3
print(x)
x='hello'
print(x)
x=(1==2)
print(x)
x=2.35789202950400
print('x={:2.5f}, x={:.1f}'.format(x,x))
x=2.35789202950400
print(type(x))
x='hello'
print(type(x))
###Output
<class 'str'>
###Markdown
2.4 NumpyList all libraries modudels: np. + tabList properties of the module: np.abs + shit + tab
###Code
import numpy as np
#np.abs()
x = np.pi
print(x)
print(np.ones((5,2,4)))
print(np.zeros(5))
print(np.eye(5))
print(np.random.normal(0,1))
print(np.random.uniform(0,1))
print(np.random.randint(10))
print(np.array(range(10)))
print(np.random.permutation(range(10)))
x = np.array(x,dtype='float32')
print(x,type(x),x.dtype)
x = np.array(3.4,dtype='int64')
print(x,type(x),x.dtype)
X = np.array([[1,2,3],[4,5,6]])
print(X)
print(X.shape)
print(X.shape[0])
print(X.shape[1])
print(X)
print(X[0,2])
print(X[0,:])
print(X[0,0:2])
print(X[0,:2])
print(X[0,1:3])
print(X[-1,-1])
X[0,:] = [7,8,9] # assignment
print(X)
X = np.array([[1,2,3],[4,5,6]])
X = np.append(X,[[7,8,9]],axis=0) # append
print(X)
print(X.shape)
###Output
[[1 2 3]
[4 5 6]
[7 8 9]]
(3, 3)
###Markdown
3. Load and save data
###Code
pwd
ls -al
cd ..
pwd
cd tutorials
data = np.loadtxt('data/profit_population.txt', delimiter=',')
print(data)
print(data.shape)
print(data.dtype)
new_data = 2* data
np.savetxt('data/profit_population_new.txt', new_data, delimiter=',', fmt='%2.5f')
%whos
###Output
Variable Type Data/Info
-------------------------------
X ndarray 3x3: 9 elems, type `int64`, 72 bytes
data ndarray 97x2: 194 elems, type `float64`, 1552 bytes
new_data ndarray 97x2: 194 elems, type `float64`, 1552 bytes
np module <module 'numpy' from '/Us<...>kages/numpy/__init__.py'>
x ndarray : 1 elems, type `int64`, 8 bytes
###Markdown
4. Linear algebra operations
###Code
X = np.array([[1,2,3],[4,5,6]])
print(X,X.shape)
Y = np.array([[2,7,-2],[1,8,3]])
print(Y,Y.shape)
Z = Y.T #transpose
print(Z,Z.shape)
Z = X* Y # element-wise matrix multiplication
print(Z,Z.shape)
Z = X.dot(Y.T) # matrix multiplication
print(Z,Z.shape)
Z = X**2
print(Z,Z.shape)
Z = 1/X
print(Z,Z.shape)
Z = np.log(X)
print(Z,Z.shape)
Z = X + 1
print(Z,Z.shape)
X = np.array([[2,7,-2],[1,8,3]])
print(X,X.shape)
print(np.max(X))
print(np.max(X,axis=0))
print(np.max(X,axis=1))
print(np.argmax(X))
print(np.argmax(X,axis=0))
print(np.argmax(X,axis=1))
X = np.array([[2,7,-2],[1,8,3]])
print(X,X.shape)
print(np.sum(X))
print(np.sum(X,axis=0))
print(np.sum(X,axis=1))
###Output
[[ 2 7 -2]
[ 1 8 3]] (2, 3)
19
[ 3 15 1]
[ 7 12]
###Markdown
5. Plotting data
###Code
# Visualization library
%matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('png2x','pdf')
import matplotlib.pyplot as plt
x = np.linspace(0,6*np.pi,100)
#print(x)
y = np.sin(x)
plt.figure(1)
plt.plot(x, y,label='sin'.format(i=1))
plt.legend(loc='best')
plt.title('Sin plotting')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
###Output
_____no_output_____
###Markdown
6. Control statements 6.1 Function
###Code
def f(x):
return x**2
x = 3
y = f(x)
print(y)
def g(x,y):
return x**2, y**3
x = 3
y = 5
u,v = g(x,y)
print(u,v)
###Output
9 125
###Markdown
6.2 Logical control statements
###Code
for i in range(10):
print(i)
i = 0
while (i<10):
i += 1
print(i)
i = True
if i==True:
print(True)
i = False
if i==True:
print(True)
elif i==False:
print(False)
###Output
True
False
###Markdown
7. Vectorization and efficient linear algebra computations 7.1 No vectorization
###Code
import time
n = 10**7
x = np.linspace(0,1,n)
y = np.linspace(0,2,n)
start = time.time()
z = 0
for i in range(len(x)):
z += x[i]*y[i]
end = time.time() - start
print(z)
print('Time=',end)
###Output
6666666.999999491
Time= 4.217767953872681
###Markdown
7.2 Vectorization
###Code
start = time.time()
z = x.T.dot(y)
end = time.time() - start
print(z)
print('Time=',end)
###Output
6666667.000000028
Time= 0.007534980773925781
###Markdown
CE9010: Introduction to Data Analysis Semester 2 2018/19 Xavier Bresson Tutorial 1: Introduction to Python Objective $\bullet$ Basic operations in Python 1. ResourcesSlides: [Python introduction by Xavier Bresson][Python introduction by Xavier Bresson]: http://data-science-training-xb.s3-website.eu-west-2.amazonaws.com/All_lectures/Lecture02_python.pdfNotebook: [Python introduction tutorial by Justin Johnson][Python introduction tutorial by Justin Johnson]: https://github.com/kuleshov/cs228-material/blob/master/tutorials/python/cs228-python-tutorial.ipynb 2. Basic operations 2.1 Elementary algebra operations
###Code
5+6
5/8
5**8
###Output
_____no_output_____
###Markdown
2.2 Logical operations
###Code
1==1
2==1
2!=1
True & False
True | False
###Output
_____no_output_____
###Markdown
2.3 Assignment operations
###Code
x=3
print(x)
x='hello'
print(x)
x=(1==2)
print(x)
x=2.35789202950400
print('x={:2.5f}, x={:.1f}'.format(x,x))
x=2.35789202950400
print(type(x))
x='hello'
print(type(x))
###Output
<class 'str'>
###Markdown
2.4 NumpyList all libraries modudels: np. + tabList properties of the module: np.abs + shit + tab
###Code
import numpy as np
#np.abs()
x = np.pi
print(x)
print(np.ones((5,2)))
print(np.zeros((5)))
print(np.zeros((5,1)).shape)
print(np.eye(5))
print(np.random.normal(0,1))
print(np.random.uniform(0,1))
print(np.random.randint(10))
print(np.array(range(10)))
print(np.random.permutation(range(10)))
x = np.array(x,dtype='float32')
print(x,type(x),x.dtype)
x = np.array(3.4,dtype='int64')
print(x,type(x),x.dtype)
X = np.array([[1,2,3],[4,5,6]])
print(X)
print(X.shape)
print(X.shape[0])
print(X.shape[1])
print(X)
print(X[0,2])
print(X[0,:])
print(X[0,0:2])
print(X[0,:2])
print(X[0,1:3])
print(X[-1,-1])
X[0,:] = [7,8,9] # assignment
print(X)
X = np.array([[1,2,3],[4,5,6]])
X = np.append(X,[[7,8,9]],axis=0) # append
print(X)
print(X.shape)
###Output
[[1 2 3]
[4 5 6]
[7 8 9]]
(3, 3)
###Markdown
3. Load and save data
###Code
pwd
ls -al
cd ..
pwd
cd tutorials
data = np.loadtxt('data/profit_population.txt', delimiter=',')
print(data)
print(data.shape)
print(data.dtype)
new_data = 2* data
np.savetxt('data/profit_population_new.txt', new_data, delimiter=',', fmt='%2.5f')
%whos
###Output
Variable Type Data/Info
-------------------------------
X ndarray 3x3: 9 elems, type `int64`, 72 bytes
data ndarray 97x2: 194 elems, type `float64`, 1552 bytes
new_data ndarray 97x2: 194 elems, type `float64`, 1552 bytes
np module <module 'numpy' from '/Us<...>kages/numpy/__init__.py'>
x ndarray : 1 elems, type `int64`, 8 bytes
###Markdown
4. Linear algebra operations
###Code
X = np.array([[1,2,3],[4,5,6]])
print(X,X.shape)
Y = np.array([[2,7,-2],[1,8,3]])
print(Y,Y.shape)
Z = Y.T #transpose
print(Z,Z.shape)
Z = X* Y # element-wise matrix multiplication
print(Z,Z.shape)
print(X.shape)
print(Y.T.shape)
Z = X.dot(Y.T) # matrix multiplication
print(Z,Z.shape)
Z = X**2
print(Z,Z.shape)
Z = 1/X
print(Z,Z.shape)
Z = np.log(X)
print(Z,Z.shape)
Z = X + 1
print(Z,Z.shape)
X = np.array([[2,7,-2],[1,8,3]])
print(X,X.shape)
print(np.max(X))
print(np.max(X,axis=0))
print(np.max(X,axis=1))
print(np.argmax(X))
print(np.argmax(X,axis=0))
print(np.argmax(X,axis=1))
X = np.array([[2,7,-2],[1,8,3]])
print(X,X.shape)
print(np.sum(X))
print(np.sum(X,axis=0))
print(np.sum(X,axis=1))
###Output
[[ 2 7 -2]
[ 1 8 3]] (2, 3)
19
[ 3 15 1]
[ 7 12]
###Markdown
5. Plotting data
###Code
# Visualization library
%matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('png2x','pdf')
import matplotlib.pyplot as plt
x = np.linspace(0,6*np.pi,100)
#print(x)
y = np.sin(x)
plt.figure(1)
plt.plot(x, y,label='sin'.format(i=1))
plt.legend(loc='best')
plt.title('Sin plotting')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
###Output
_____no_output_____
###Markdown
6. Control statements 6.1 Function
###Code
def f(x):
return x**2
x = 3
y = f(x)
print(y)
def g(x,y):
return x**2, y**3
x = 3
y = 5
u,v = g(x,y)
print(u,v)
###Output
9 125
###Markdown
6.2 Logical control statements
###Code
for i in range(10):
print(i)
i = 0
while (i<10):
i += 1
print(i)
i = True
if i==True:
print(True)
i = False
if i==True:
print(True)
elif i==False:
print(False)
###Output
True
False
###Markdown
7. Vectorization and efficient linear algebra computations 7.1 No vectorization
###Code
import time
n = 10**7
x = np.linspace(0,1,n)
y = np.linspace(0,2,n)
start = time.time()
z = 0
for i in range(len(x)):
z += x[i]*y[i]
end = time.time() - start
print(z)
print('Time=',end)
###Output
6666666.999999491
Time= 5.725438833236694
###Markdown
7.2 Vectorization
###Code
start = time.time()
z = x.T.dot(y)
end = time.time() - start
print(z)
print('Time=',end)
###Output
6666667.00000005
Time= 0.00741887092590332
###Markdown
CE9010: Introduction to Data Analysis Semester 2 2018/19 Xavier Bresson Tutorial 1: Introduction to Python Objective $\bullet$ Basic operations in Python 1. ResourcesSlides: [Python introduction by Xavier Bresson][Python introduction by Xavier Bresson]: http://data-science-training-xb.s3-website.eu-west-2.amazonaws.com/All_lectures/Lecture02_python.pdfNotebook: [Python introduction tutorial by Justin Johnson][Python introduction tutorial by Justin Johnson]: https://github.com/kuleshov/cs228-material/blob/master/tutorials/python/cs228-python-tutorial.ipynb 2. Basic operations 2.1 Elementary algebra operations
###Code
5+6
5/8
5**8
###Output
_____no_output_____
###Markdown
2.2 Logical operations
###Code
1==1
2==1
2!=1
True & False
True | False
###Output
_____no_output_____
###Markdown
2.3 Assignment operations
###Code
x=3
print(x)
x='hello'
print(x)
x=(1==2)
print(x)
x=2.35789202950400
print('x={:2.5f}, x={:.1f}'.format(x,x))
x=2.35789202950400
print(type(x))
x='hello'
print(type(x))
###Output
<class 'str'>
###Markdown
2.4 NumpyList all libraries modudels: np. + tabList properties of the module: np.abs + shit + tab
###Code
import numpy as np
#np.abs()
x = np.pi
print(x)
print(np.ones((5,2)))
print(np.zeros((5)))
print(np.zeros((5,1)).shape)
print(np.eye(5))
print(np.random.normal(0,1))
print(np.random.uniform(0,1))
print(np.random.randint(10))
print(np.array(range(10)))
print(np.random.permutation(range(10)))
x = np.array(x,dtype='float32')
print(x,type(x),x.dtype)
x = np.array(3.4,dtype='int64')
print(x,type(x),x.dtype)
X = np.array([[1,2,3],[4,5,6]])
print(X)
print(X.shape)
print(X.shape[0])
print(X.shape[1])
print(X)
print(X[0,2])
print(X[0,:])
print(X[0,0:2])
print(X[0,:2])
print(X[0,1:3])
print(X[-1,-1])
X[0,:] = [7,8,9] # assignment
print(X)
X = np.array([[1,2,3],[4,5,6]])
X = np.append(X,[[7,8,9]],axis=0) # append
print(X)
print(X.shape)
###Output
[[1 2 3]
[4 5 6]
[7 8 9]]
(3, 3)
###Markdown
3. Load and save data
###Code
pwd
ls -al
cd ..
pwd
cd tutorials
data = np.loadtxt('data/profit_population.txt', delimiter=',')
print(data)
print(data.shape)
print(data.dtype)
new_data = 2* data
np.savetxt('data/profit_population_new.txt', new_data, delimiter=',', fmt='%2.5f')
%whos
###Output
Variable Type Data/Info
-------------------------------
X ndarray 2x3: 6 elems, type `int64`, 48 bytes
data ndarray 97x2: 194 elems, type `float64`, 1552 bytes
np module <module 'numpy' from '/Us<...>kages/numpy/__init__.py'>
x ndarray : 1 elems, type `float32`, 4 bytes
###Markdown
4. Linear algebra operations
###Code
X = np.array([[1,2,3],[4,5,6]])
print(X,X.shape)
Y = np.array([[2,7,-2],[1,8,3]])
print(Y,Y.shape)
Z = Y.T #transpose
print(Z,Z.shape)
Z = X* Y # element-wise matrix multiplication
print(Z,Z.shape)
print(X.shape)
print(Y.T.shape)
Z = X.dot(Y.T) # matrix multiplication
print(Z,Z.shape)
Z = X**2
print(Z,Z.shape)
Z = 1/X
print(Z,Z.shape)
Z = np.log(X)
print(Z,Z.shape)
Z = X + 1
print(Z,Z.shape)
X = np.array([[2,7,-2],[1,8,3]])
print(X,X.shape)
print(np.max(X))
print(np.max(X,axis=0))
print(np.max(X,axis=1))
print(np.argmax(X))
print(np.argmax(X,axis=0))
print(np.argmax(X,axis=1))
X = np.array([[2,7,-2],[1,8,3]])
print(X,X.shape)
print(np.sum(X))
print(np.sum(X,axis=0))
print(np.sum(X,axis=1))
###Output
[[ 2 7 -2]
[ 1 8 3]] (2, 3)
19
[ 3 15 1]
[ 7 12]
###Markdown
5. Plotting data
###Code
# Visualization library
%matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('png2x','pdf')
import matplotlib.pyplot as plt
x = np.linspace(0,6*np.pi,100)
#print(x)
y = np.sin(x)
plt.figure(1)
plt.plot(x, y,label='sin'.format(i=1))
plt.legend(loc='best')
plt.title('Sin plotting')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
###Output
_____no_output_____
###Markdown
6. Control statements 6.1 Function
###Code
def f(x):
return x**2
x = 3
y = f(x)
print(y)
def g(x,y):
return x**2, y**3
x = 3
y = 5
u,v = g(x,y)
print(u,v)
###Output
9 125
###Markdown
6.2 Logical control statements
###Code
for i in range(10):
print(i)
i = 0
while (i<10):
i += 1
print(i)
i = True
if i==True:
print(True)
i = False
if i==True:
print(True)
elif i==False:
print(False)
###Output
True
False
###Markdown
7. Vectorization and efficient linear algebra computations 7.1 No vectorization
###Code
import time
n = 10**7
x = np.linspace(0,1,n)
y = np.linspace(0,2,n)
start = time.time()
z = 0
for i in range(len(x)):
z += x[i]*y[i]
end = time.time() - start
print(z)
print('Time=',end)
###Output
6666666.999999491
Time= 4.217767953872681
###Markdown
7.2 Vectorization
###Code
start = time.time()
z = x.T.dot(y)
end = time.time() - start
print(z)
print('Time=',end)
###Output
6666667.000000028
Time= 0.007534980773925781
###Markdown
CE9010: Introduction to Data Science Semester 2 2017/18 Xavier Bresson Tutorial 1: Introduction to Python Objective $\bullet$ Basic operations in Python 1. ResourcesSlides: [Python introduction by Xavier Bresson][Python introduction by Xavier Bresson]: http://data-science-training-xb.s3-website.eu-west-2.amazonaws.com/All_lectures/Lecture02_python.pdfNotebook: [Python introduction tutorial by Justin Johnson][Python introduction tutorial by Justin Johnson]: https://github.com/kuleshov/cs228-material/blob/master/tutorials/python/cs228-python-tutorial.ipynb 2. Basic operations 2.1 Elementary algebra operations
###Code
5+6
5/8
5**8
###Output
_____no_output_____
###Markdown
2.2 Logical operations
###Code
1==1
2==1
2!=1
True & False
True | False
###Output
_____no_output_____
###Markdown
2.3 Assignment operations
###Code
x=3
print(x)
x='hello'
print(x)
x=(1==2)
print(x)
x=2.35789202950400
print('x={:2.5f}, x={:.1f}'.format(x,x))
x=2.35789202950400
print(type(x))
x='hello'
print(type(x))
###Output
<class 'str'>
###Markdown
2.4 NumpyList all libraries modudels: np. + tabList properties of the module: np.abs + shit + tab
###Code
import numpy as np
np.abs()
x = np.pi
print(x)
print(np.ones((5,2,4)))
print(np.zeros(5))
print(np.eye(5))
print(np.random.normal(0,1))
print(np.random.uniform(0,1))
print(np.random.randint(10))
print(np.array(range(10)))
print(np.random.permutation(range(10)))
x = np.array(y,dtype='float32')
print(x,type(x),x.dtype)
x = np.array(3.4,dtype='int64')
print(x,type(x),x.dtype)
X = np.array([[1,2,3],[4,5,6]])
print(X)
print(X.shape)
print(X.shape[0])
print(X.shape[1])
print(X)
print(X[0,2])
print(X[0,:])
print(X[0,0:2])
print(X[0,:2])
print(X[0,1:3])
print(X[-1,-1])
X[0,:] = [7,8,9] # assignment
print(X)
X = np.array([[1,2,3],[4,5,6]])
X = np.append(X,[[7,8,9]],axis=0) # append
print(X)
print(X.shape)
###Output
[[1 2 3]
[4 5 6]
[7 8 9]]
(3, 3)
###Markdown
3. Load and save data
###Code
pwd
ls -al
cd ..
pwd
data = np.loadtxt('data/profit_population.txt', delimiter=',')
print(data)
print(data.shape)
print(data.dtype)
new_data = 2* data
np.savetxt('data/profit_population_new.txt', new_data, delimiter=',', fmt='%2.5f')
%whos
###Output
Variable Type Data/Info
-------------------------------
data ndarray 97x2: 194 elems, type `float64`, 1552 bytes
new_data ndarray 97x2: 194 elems, type `float64`, 1552 bytes
np module <module 'numpy' from '/Us<...>kages/numpy/__init__.py'>
###Markdown
4. Linear algebra operations
###Code
X = np.array([[1,2,3],[4,5,6]])
print(X,X.shape)
Y = np.array([[2,7,-2],[1,8,3]])
print(Y,Y.shape)
Z = Y.T #transpose
print(Z,Z.shape)
Z = X* Y # element-wise matrix multiplication
print(Z,Z.shape)
Z = X.dot(Y.T) # matrix multiplication
print(Z,Z.shape)
Z = X**2
print(Z,Z.shape)
Z = 1/X
print(Z,Z.shape)
Z = np.log(X)
print(Z,Z.shape)
Z = X + 1
print(Z,Z.shape)
X = np.array([[2,7,-2],[1,8,3]])
print(X,X.shape)
print(np.max(X))
print(np.max(X,axis=0))
print(np.max(X,axis=1))
print(np.argmax(X))
print(np.argmax(X,axis=0))
print(np.argmax(X,axis=1))
X = np.array([[2,7,-2],[1,8,3]])
print(X,X.shape)
print(np.sum(X))
print(np.sum(X,axis=0))
print(np.sum(X,axis=1))
###Output
[[ 2 7 -2]
[ 1 8 3]] (2, 3)
19
[ 3 15 1]
[ 7 12]
###Markdown
5. Plotting data
###Code
# Visualization library
%matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('png2x','pdf')
import matplotlib.pyplot as plt
x = np.linspace(0,6*np.pi,100)
#print(x)
y = np.sin(x)
plt.figure(1)
plt.plot(x, y,label='sin'.format(i=1))
plt.legend(loc='best')
plt.title('Sin plotting')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
###Output
_____no_output_____
###Markdown
6. Control statements 6.1 Function
###Code
def f(x):
return x**2
x = 3
y = f(x)
print(y)
def g(x,y):
return x**2, y**3
x = 3
y = 5
u,v = g(x,y)
print(u,v)
###Output
9 125
###Markdown
6.2 Logical control statements
###Code
for i in range(10):
print(i)
i = 0
while (i<10):
i += 1
print(i)
i = True
if i==True:
print(True)
i = False
if i==True:
print(True)
elif i==False:
print(False)
###Output
True
False
###Markdown
7. Vectorization and efficient linear algebra computations 7.1 No vectorization
###Code
import time
n = 10**7
x = np.linspace(0,1,n)
y = np.linspace(0,2,n)
start = time.time()
z = 0
for i in range(len(x)):
z += x[i]*y[i]
end = time.time() - start
print(z)
print('Time=',end)
###Output
6666667.0
Time= 4.109534978866577
###Markdown
7.2 Vectorization
###Code
start = time.time()
z = x.T.dot(y)
end = time.time() - start
print(z)
print('Time=',end)
###Output
6666667.0
Time= 0.011171817779541016
|
notebooks/08_Funcoes.ipynb | ###Markdown
[View in Colaboratory](https://colab.research.google.com/github/ufrpe-eagri-ic/aulas/blob/master/08_Funcoes.ipynb) FunçõesFunções nos permitem escrever código que podemos usar no futuro. Quando colocamos uma série de instruções em uma função, podemos reutilizá-lo para receber entradas, realizar cálculos ou outras manipulações e retornar saídas, assim como uma função em matemática.É como se fosse um bloco de código reutilizavelComo criar uma função:```def nome_da_funcao(argumento01, argumento02, ...): Código da função return alguma_coisa```O **def** declara que estaremos construindo uma função na qual temos o nome da função e seus argumentos que serão passados para funçãoO **return** é opcional caso sua função retorne algo, ela pode somente imprimir algo não precisando do return, as funções podem retornar vários valores de uma vez, retornando uma **tupla** ou valores unicos se atribuido para a mesma quantidade de variaveis que retornouAs funções devem ser definidas antes de ser executadasUsar a função:```alguma_coisa = nome_da_funcao(argumento01, argumento02)```Nomeclaturas: Não de o nome da função para nome de variaveis**Exemplo**Função chamada **maior_numero** que recebe dois numeros, e imprime o maior deles.
###Code
def maior_numero(num1, num2):
maior = num1
if num2 > num1:
maior = num2
print('O número ', maior, ' é o maior')
###Output
_____no_output_____
###Markdown
para usar a função:
###Code
maior_numero(2, 10)
# usando quantas vezes quiser
maior_numero(10,4)
maior_numero(20, 16)
###Output
_____no_output_____
###Markdown
**Exercício**Escreva uma função que receba uma lista e imprima apenas os números pares
###Code
###Output
_____no_output_____
###Markdown
Retornando valoresPara retornar um valor, utilizar a palavra **return** Exemplo:Função que retorna True caso o número informado seja ímpar e False caso contrário
###Code
def eh_impar(x):
if x%2 == 0:
return False
else:
return True
print(eh_impar(4))
print(eh_impar(1))
print(eh_impar(3))
###Output
False
True
True
###Markdown
Retornando mais de um valor
###Code
# Define a função de operações matematicas e retorna duas operações
def soma_multiplica(number1, number2):
return number1+number2, number1*number2
print(soma_multiplica(3, 4))
print(soma_multiplica(2, 5))
###Output
_____no_output_____
###Markdown
ExercícioEscreva uma função em python chamada ‘histograma’que receba como entrada uma string, e retorne um dicionário cujas chaves correspondem à letras da string de entrada, e os valores correspondam à quantidade de vezes que cada letra se repete na string. Por exemplo:```>>> h = histograma('brontosaurus')>>> h{'a': 1, 'b': 1, 'o': 2, 'n': 1, 's': 2, 'r': 2, 'u': 2, 't': 1}```
###Code
###Output
_____no_output_____ |
2019/Day 02.ipynb | ###Markdown
Day 2 - Intcode interpreter* https://adventofcode.com/2019/day/2We have a computer again! We've seen this before in 2017 ([day 18](../2017/Day%2018.ipynb), [day 23](../2017/Day%2023.ipynb)), and 2018 ([day 16](../2018/Day%2016.ipynb), [day 19](../2018/Day%2019.ipynb) and [day 21](../2018/Day%2021.ipynb)).Now we have opcodes with a variable number of operands (called *positions* here); `1` and `2` each have 2 operands and output destination, 99 has none. There are also no registers, all operations take place directly on the memory where our code is also stored, so it can self-modify. Fun!So we need a CPU with a position counter, memory, and opcode definitions (*instructions*) to call, and the opcodes need access to the memory (to read operand values and write out their result to). Easy peasy.I'm assuming we'll expand on the instruction set later on, and that we might have instructions with different numbers of operands. So given a function to process the input values and the number of *paramaters* to process, we should be able to produce something readable and reusable.
###Code
import aocd
data = aocd.get_data(day=2, year=2019)
memory = list(map(int, data.split(',')))
from __future__ import annotations
import operator
from dataclasses import dataclass
from typing import Callable, List, Mapping, Optional
Memory = List[int]
class Halt(Exception):
"""Signal to end the program"""
@classmethod
def halt(cls) -> int: # yes, because Opcode.f callables always produce ints, right?
raise cls
@dataclass
class Instruction:
# the inputs are processed by a function that operates on integers
# returns integers to store in a destination position
f: Callable[..., int]
# An opcode takes N paramaters
paramater_count: int
def __call__(self, memory: Memory, *parameters: int) -> None:
if parameters:
*inputs, output = parameters
memory[output] = self.f(*(memory[addr] for addr in inputs))
else:
# no parameter count, so just call the function directly, no output expected
self.f()
class CPU:
memory: Memory
pos: int
opcodes: Mapping[int, Instruction] = {
1: Instruction(operator.add, 3),
2: Instruction(operator.mul, 3),
99: Instruction(Halt.halt, 0),
}
def reset(self, memory: Memory = None):
if memory is None:
memory = []
self.memory = memory[:]
self.pos: int = 0
def execute(
self, memory: Memory,
noun: Optional[int] = None,
verb: Optional[int] = None
) -> int:
self.reset(memory)
memory = self.memory
if noun is not None:
memory[1] = noun
if verb is not None:
memory[2] = verb
try:
while True:
op = self.opcodes[memory[self.pos]]
paramcount = op.paramater_count
parameters = memory[self.pos + 1 : self.pos + 1 + paramcount]
op(memory, *parameters)
self.pos += 1 + paramcount
except Halt:
return memory[0]
test: Memory = [1, 9, 10, 3, 2, 3, 11, 0, 99, 30, 40, 50]
cpu = CPU()
assert cpu.execute(test) == 3500
print('Part 1:', cpu.execute(memory, 12, 2))
###Output
Part 1: 3706713
###Markdown
Part 2Now we need to find the noun and verb that produce a specific programme output. The text suggests we should just brute-force this, so lets try that first and see how long that takes. Given that we'll only have to search through 10000 different inputs, and there are no options to loop, that's not that big a search space anyway.While the code can self-modify, this is limited to:- altering what inputs are read- where to write the output- replacing a read or write op with another read, write or halt opso we execute, at most, `len(memory) // 4` instructions, which for my input means there are only 32 steps per execution run, and so we are going to execute, at most, 32.000 instructions. That's pretty cheap:
###Code
from itertools import product
def bruteforce(target: int, memory: Memory) -> int:
cpu = CPU()
for noun, verb in product(range(100), repeat=2):
result = cpu.execute(memory, noun, verb)
if result == target:
break
return 100 * noun + verb
print('Part 2:', bruteforce(19690720, memory))
###Output
Part 2: 8609
###Markdown
Avoiding brute forceCan we just calculate the number? We'd have to disassemble the inputs to see what is going on. Provided the programme never alters its own instructions, we should be able to figure this out.Lets see if we need to worry about self-modifying code first:
###Code
# code is expected to alter memory[0], so we don't count that
# as self modifying as the CPU will never return there.
if any(target and target % 4 == 0 for target in memory[3::4]):
print('Code targets opcodes')
elif any(target % 4 and target > (i * 4 + 3) for i, target in enumerate(memory[3::4])):
print('Code targets parameters of later opcodes')
else:
print('Code is not self-modifying')
###Output
Code is not self-modifying
###Markdown
For my puzzle input, the above declares the code to not be self modifying. So all we have is addition and multiplication of memoryalready harvested for opcodes and parameter addresses. It's just a big sum! Note that some operations might write to a destination address that is then never read from, or overwritten by other operations. We could just eliminate those steps, if we could detect those cases. What does the sum look like?We can skip the first operation (`ADD 1 2 3`) because the *next* expression also writes to `3` without using the outcome of the first. That makes sense, because `1` and `2` are our `noun` and `verb` inputs and those can be anywhere in the programme. Or, like I do below, you can just skip the type error that `listobject[string]` throws when trying to use either `'noun'` or `'verb'` as indices.
###Code
fmemory = memory[:]
fmemory[1:3] = 'noun', 'verb'
for opcode, a, b, target in zip(*([iter(fmemory)] * 4)):
if opcode == 99:
break
try:
fmemory[target] = f"({fmemory[a]}{' +*'[opcode]}{fmemory[b]})"
except TypeError as e:
# the first instruction is to add memory[noun] and memory[verb]
# and store in 3 but the next instruction also stores in 3,
# ignoring the previous result.
assert a == 'noun' and b == 'verb'
formula = fmemory[0]
print(formula)
###Output
(3+((1+(3*(1+((3+(3+(5*(2*(4+((5*(1+((5*(1+(2*((4*((((2+(5+(2+(noun*4))))+4)+2)+5))+2))))*3)))+1))))))*3))))+verb))
###Markdown
If you were to compile this to a function; Python's AST optimizer will actually replace a lot of the constants; I'm using [Astor](https://github.com/berkerpeksag/astor/) here to simplify roundtripping and pretty printing, so we can see what Python makes of it:
###Code
import ast
import astor
from textwrap import wrap
simplified = astor.to_source(ast.parse(formula))
print("19690720 =", simplified)
###Output
19690720 = 3 + (1 + 3 * (1 + (3 + (3 + 5 * (2 * (4 + (5 * (1 + 5 * (1 + 2 * (4 * (2 +
(5 + (2 + noun * 4)) + 4 + 2 + 5) + 2)) * 3) + 1))))) * 3) + verb)
###Markdown
This is something we can work with! Clearly this is a simple [linear Diophantine equation](https://en.wikipedia.org/wiki/Diophantine_equationLinear_Diophantine_equations) that can be solved for either `noun` or `verb`, so let's see if [sympy](https://docs.sympy.org/latest/), the Python symbolic maths solver can do something with this.We know that both `noun` and `verb` are values in the range `[0, 100)`, so we can use this to see what inputs in that range produce an output in that range:
###Code
import dis
from IPython.display import display, Markdown
from sympy import diophantine, lambdify, symbols, sympify, Eq, Symbol
from sympy.solvers import solve
# ask Sympy to parse our formula; it'll simplify the formula for us
display(Markdown("### Simplified expression:"))
expr = sympify(formula) - 19690720
display(expr)
# extract the symbols
noun, verb = sorted(expr.free_symbols, key=lambda s: s.name)
display(Markdown("### Solution for the linear diophantine equation"))
# solutions for the two input variables, listed in alphabetical order,
for noun_expr, verb_expr in diophantine(expr):
if isinstance(noun_expr, Symbol):
solution = verb_expr.subs(noun_expr, noun)
arg, result = noun, verb
else:
solution = noun_expr.subs(verb_expr, verb)
arg, result = verb, noun.name
display(Eq(result, solution))
for i in range(100):
other = solution.subs(arg, i)
if 0 <= other < 100:
noun_value = other if result.name == 'noun' else i
verb_value = i if result.name == 'noun' else other
display(Markdown(
f"""### Solution found:
* $noun = {noun_value}$
* $verb = {verb_value}$
* $100noun + verb = {100 * noun_value + verb_value}$
"""))
break
###Output
_____no_output_____
###Markdown
Unfortunately, even Sympy's `solveset()` function couldn't help me eliminate the loop over `range(100)`; in principle this should be possible using an `Range(100)` set, but `solveset()` just isn't quite there yet. A [related question on Stack Overflow](https://stackoverflow.com/questions/46013884/get-all-positive-integral-solutions-for-a-linear-equation) appears to confirm that using a loop is the correct method here. I could give Sage a try for this, perhaps.That said, if you look at the $term1 - term2 \times arg$ solution to the diophantine equation, to me it is clear that `noun` and `verb` are simply the division and modulus, respectively, of $term1$ and $term2$:
###Code
from sympy import postorder_traversal, Integer
term1, term2 = (abs(int(e)) for e in postorder_traversal(expr) if isinstance(e, Integer))
print(f"divmod({term1}, {term2})")
noun, verb = divmod(term1, term2)
print(f"{noun=}, {verb=}, {100 * noun + verb=}")
###Output
divmod(18576009, 216000)
noun=86, verb=9, 100 * noun + verb=8609
###Markdown
Day 2 - Intcode interpreter* https://adventofcode.com/2019/day/2We have a computer again! We've seen this before in 2017 ([day 18](../2017/Day%2018.ipynb), [day 23](../2017/Day%2023.ipynb)), and 2018 ([day 16](../2018/Day%2016.ipynb), [day 19](../2018/Day%2019.ipynb) and [day 21](../2018/Day%2021.ipynb)).Now we have opcodes with a variable number of operands (called *positions* here); `1` and `2` each have 2 operands and output destination, 99 has none. There are also no registers, all operations take place directly on the memory where our code is also stored, so it can self-modify. Fun!So we need a CPU with a position counter, memory, and opcode definitions (*instructions*) to call, and the opcodes need access to the memory (to read operand values and write out their result to). Easy peasy.I'm assuming we'll expand on the instruction set later on, and that we might have instructions with different numbers of operands. So given a function to process the input values and the number of *paramaters* to process, we should be able to produce something readable and reusable.
###Code
import aocd
data = aocd.get_data(day=2, year=2019)
memory = list(map(int, data.split(',')))
from __future__ import annotations
import operator
from dataclasses import dataclass
from typing import Callable, List, Mapping, Optional
Memory = List[int]
class Halt(Exception):
"""Signal to end the program"""
@classmethod
def halt(cls) -> int: # yes, because Opcode.f callables always produce ints, right?
raise cls
@dataclass
class Instruction:
# the inputs are processed by a function that operates on integers
# returns integers to store in a destination position
f: Callable[..., int]
# An opcode takes N paramaters
paramater_count: int
def __call__(self, memory: Memory, *parameters: int) -> None:
if parameters:
*inputs, output = parameters
memory[output] = self.f(*(memory[addr] for addr in inputs))
else:
# no parameter count, so just call the function directly, no output expected
self.f()
class CPU:
memory: Memory
pos: int
opcodes: Mapping[int, Instruction] = {
1: Instruction(operator.add, 3),
2: Instruction(operator.mul, 3),
99: Instruction(Halt.halt, 0),
}
def reset(self, memory: Memory = None):
if memory is None:
memory = []
self.memory = memory[:]
self.pos: int = 0
def execute(
self, memory: Memory,
noun: Optional[int] = None,
verb: Optional[int] = None
) -> int:
self.reset(memory)
memory = self.memory
if noun is not None:
memory[1] = noun
if verb is not None:
memory[2] = verb
try:
while True:
op = self.opcodes[memory[self.pos]]
paramcount = op.paramater_count
parameters = memory[self.pos + 1 : self.pos + 1 + paramcount]
op(memory, *parameters)
self.pos += 1 + paramcount
except Halt:
return memory[0]
test: Memory = [1, 9, 10, 3, 2, 3, 11, 0, 99, 30, 40, 50]
cpu = CPU()
assert cpu.execute(test) == 3500
print('Part 1:', cpu.execute(memory, 12, 2))
###Output
Part 1: 3706713
###Markdown
Part 2Now we need to find the noun and verb that produce a specific programme output. The text suggests we should just brute-force this, so lets try that first and see how long that takes. Given that we'll only have to search through 10000 different inputs, and there are no options to loop, that's not that big a search space anyway.While the code can self-modify, this is limited to:- altering what inputs are read- where to write the output- replacing a read or write op with another read, write or halt opso we execute, at most, `len(memory) // 4` instructions, which for my input means there are only 32 steps per execution run, and so we are going to execute, at most, 32.000 instructions. That's pretty cheap:
###Code
from itertools import product
def bruteforce(target: int, memory: Memory) -> int:
cpu = CPU()
for noun, verb in product(range(100), repeat=2):
result = cpu.execute(memory, noun, verb)
if result == target:
break
return 100 * noun + verb
print('Part 2:', bruteforce(19690720, memory))
###Output
Part 2: 8609
###Markdown
Avoiding brute forceCan we just calculate the number? We'd have to disassemble the inputs to see what is going on. Provided the programme never alters its own instructions, we should be able to figure this out.Lets see if we need to worry about self-modifying code first:
###Code
# code is expected to alter memory[0], so we don't count that
# as self modifying as the CPU will never return there.
if any(target and target % 4 == 0 for target in memory[3::4]):
print('Code targets opcodes')
elif any(target % 4 and target > (i * 4 + 3) for i, target in enumerate(memory[3::4])):
print('Code targets parameters of later opcodes')
else:
print('Code is not self-modifying')
###Output
Code is not self-modifying
###Markdown
For my puzzle input, the above declares the code to not be self modifying. So all we have is addition and multiplication of memoryalready harvested for opcodes and parameter addresses. It's just a big sum! Note that some operations might write to a destination address that is then never read from, or overwritten by other operations. We could just eliminate those steps, if we could detect those cases. What does the sum look like?We can skip the first operation (`ADD 1 2 3`) because the *next* expression also writes to `3` without using the outcome of the first. That makes sense, because `1` and `2` are our `noun` and `verb` inputs and those can be anywhere in the programme. Or, like I do below, you can just skip the type error that `listobject[string]` throws when trying to use either `'noun'` or `'verb'` as indices.
###Code
fmemory = memory[:]
fmemory[1:3] = 'noun', 'verb'
for opcode, a, b, target in zip(*([iter(fmemory)] * 4)):
if opcode == 99:
break
try:
fmemory[target] = f"({fmemory[a]}{' +*'[opcode]}{fmemory[b]})"
except TypeError as e:
# the first instruction is to add memory[noun] and memory[verb]
# and store in 3 but the next instruction also stores in 3,
# ignoring the previous result.
assert a == 'noun' and b == 'verb'
formula = fmemory[0]
print(formula)
###Output
(3+((1+(3*(1+((3+(3+(5*(2*(4+((5*(1+((5*(1+(2*((4*((((2+(5+(2+(noun*4))))+4)+2)+5))+2))))*3)))+1))))))*3))))+verb))
###Markdown
If you were to compile this to a function; Python's AST optimizer will actually replace a lot of the constants; I'm using [Astor](https://github.com/berkerpeksag/astor/) here to simplify roundtripping and pretty printing, so we can see what Python makes of it:
###Code
import ast
import astor
from textwrap import wrap
simplified = astor.to_source(ast.parse(formula))
print("19690720 =", simplified)
###Output
19690720 = 3 + (1 + 3 * (1 + (3 + (3 + 5 * (2 * (4 + (5 * (1 + 5 * (1 + 2 * (4 * (2 +
(5 + (2 + noun * 4)) + 4 + 2 + 5) + 2)) * 3) + 1))))) * 3) + verb)
###Markdown
This is something we can work with! Clearly this is a simple [linear Diophantine equation](https://en.wikipedia.org/wiki/Diophantine_equationLinear_Diophantine_equations) that can be solved for either `noun` or `verb`, so let's see if [sympy](https://docs.sympy.org/latest/), the Python symbolic maths solver can do something with this.We know that both `noun` and `verb` are values in the range `[0, 100)`, so we can use this to see what inputs in that range produce an output in that range:
###Code
from IPython.display import display, Markdown
from sympy import diophantine, sympify, Eq, Symbol
# ask Sympy to parse our formula; it'll simplify the formula for us
display(Markdown("### Simplified expression:"))
expr = sympify(formula) - 19690720
display(expr)
# extract the symbols
noun, verb = sorted(expr.free_symbols, key=lambda s: s.name)
display(Markdown("### Solution for the linear diophantine equation"))
# solutions for the two input variables, listed in alphabetical order,
for noun_expr, verb_expr in diophantine(expr):
if isinstance(noun_expr, Symbol):
solution = verb_expr.subs(noun_expr, noun)
arg, result = noun, verb
else:
solution = noun_expr.subs(verb_expr, verb)
arg, result = verb, noun.name
display(Eq(result, solution))
for i in range(100):
other = solution.subs(arg, i)
if 0 <= other < 100:
noun_value = other if result.name == 'noun' else i
verb_value = i if result.name == 'noun' else other
display(Markdown(
f"""### Solution found:
* $noun = {noun_value}$
* $verb = {verb_value}$
* $100noun + verb = {100 * noun_value + verb_value}$
"""))
break
###Output
_____no_output_____
###Markdown
Unfortunately, even Sympy's `solveset()` function couldn't help me eliminate the loop over `range(100)`; in principle this should be possible using an `Range(100)` set, but `solveset()` just isn't quite there yet. A [related question on Stack Overflow](https://stackoverflow.com/questions/46013884/get-all-positive-integral-solutions-for-a-linear-equation) appears to confirm that using a loop is the correct method here. I could give Sage a try for this, perhaps.That said, if you look at the $term1 - term2 \times arg$ solution to the diophantine equation, to me it is clear that `noun` and `verb` are simply the division and modulus, respectively, of $term1$ and $term2$:
###Code
from sympy import postorder_traversal, Integer
term1, term2 = (abs(int(e)) for e in postorder_traversal(expr) if isinstance(e, Integer))
print(f"divmod({term1}, {term2})")
noun, verb = divmod(term1, term2)
print(f"{noun=}, {verb=}, {100 * noun + verb=}")
###Output
divmod(18576009, 216000)
noun=86, verb=9, 100 * noun + verb=8609
|
examples/TrackAnalysis/BTrackMateVisualization.ipynb | ###Markdown
Visualize Dividing tracks
###Code
TM.TrackMateLiveTracks(RawImage, LabelImage, Mask,savedir, calibration,all_track_properties, True)
###Output
_____no_output_____
###Markdown
Visualize Non Dividing tracks
###Code
TM.TrackMateLiveTracks(RawImage, LabelImage, Mask,savedir, calibration, all_track_properties, False)
###Output
_____no_output_____ |
week2/2_Conditions_part2.ipynb | ###Markdown
תנאים – חלק 2 תנאים – תזכורת ניזכר במחברת הקודמת, שבה למדנו על תנאים.למדנו שבעזרת מילת המפתח if אנחנו יכולים לבקש מהקוד שלנו לבצע סדרת פעולות, רק אם תנאי כלשהו מתקיים.במילים אחרות: אנחנו יכולים לבקש מקטע קוד לרוץ, רק אם ביטוי בוליאני מסוים שווה ל־True. נראה דוגמה:
###Code
shoes_in_my_drawer = int(input("How many shoes do you have in your drawer? "))
if shoes_in_my_drawer % 2 == 1:
print("You have an odd number of shoes. Something is wrong!")
###Output
_____no_output_____
###Markdown
בקוד שלמעלה, ביקשנו מהמשתמש להזין את מספר הנעליים שיש לו, ואם המספר היה אי־זוגי הדפסנו לו שמשהו מוזר מתרחש.אבל מה קורה אם נרצה להדפיס למשתמש הודעת אישור כשאנחנו מגלים שהכול בסדר? כיצד הייתם משפרים את התוכנית שלמעלה כך שתדפיס למשתמש הודעת אישור שהכול בסדר? השתמשו בכלים שרכשתם במחברת הקודמת. נסו לחשוב על לפחות 2 דרכים דומות. חשוב! פתרו לפני שתמשיכו! מה אם לא?על מילת המפתח else דרך אחת לפתור את התרגיל שהופיע למעלה, היא זו:
###Code
shoes_in_my_drawer = int(input("How many shoes do you have in your drawer? "))
if shoes_in_my_drawer % 2 == 1:
print("You have an odd number of shoes. Something is wrong!")
if shoes_in_my_drawer % 2 != 1:
print("You have an even number of shoes. Congratulations!")
###Output
_____no_output_____
###Markdown
כדי להדפיס למשתמש הודעה מתאימה בכל מצב, הוספנו תנאי הפוך מהתנאי הראשון, שידפיס למשתמש הודעה מתאימה.אחד הדברים המעניינים בתוכנית שלמעלה, הוא שיש לנו שני תנאים שמנוגדים זה לזה במשמעות שלהם. אחד בודק זוגיות, והשני בודק אי־זוגיות.למעשה, אם היינו רוצים לנסח את הקוד במילים, היינו יכולים להגיד: אם מספר הנעליים הוא אי־זוגי, הדפס שיש בעיה. אחרת, הדפס שהכול בסדר. פייתון מקנה לנו כלי נוח לבטא את ה"אחרת" הזה, כלי שמקל על הקריאוּת של הקוד – מילת המפתח else. נראה איך אפשר לנסח את הקוד שלמעלה בעזרת else:
###Code
shoes_in_my_drawer = int(input("How many shoes do you have in your drawer? "))
if shoes_in_my_drawer % 2 == 1:
print("You have an odd number of shoes. Something is wrong!")
else:
print("You have an even number of shoes. Congratulations!")
###Output
_____no_output_____
###Markdown
שימו לב לצורת ההזחה: ה־else אינו מוזח, אך התוכן שבתוכו כן.נזכור גם ש־else קשור ל־if שלפניו, ומדבר על המקרה המשלים לתנאי שנמצא באותו if.דרך נוספת לחשוב על else היא שקטע הקוד בתוך ה־else יתבצע אם תוצאתו של הביטוי הבוליאני שמופיע כתנאי של ה־if היא False. דוגמאות לתנאים עם elseבדוגמאות הבאות, התנאי מופיע כך, והפעולות שיקרו בעקבותיו מופיעות כך.אם השעה היא לפני 20:00, שמור על תאורה גבוהה במסעדה. אחרת, עמעם את התאורה.אם הגיל של המשתמש הוא לפחות 18, אפשר לו להיכנס לבר. אחרת, הצע לו אטרקציות אחרות לבקר בהן וגם אל תכניס אותו.אם המשתמש בכספומט הזין סכום שלילי, או יותר מהסכום הזמין בחשבון שלו, הצג לו הודעת שגיאה. אחרת, הפחת לו את הסכום מהחשבון, הוצא לו שטרות בסכום שהזין.אם הדוד כבוי וגם השעה היא לפני 8:00 וגם השעה היא אחרי 7:00, הדלק את הדוד. אחרת, אם הדוד דלוק – כבה את הדוד. זרימת התוכנית: ציור לדוגמה מימוש לדוגמה התוכנית המוצגת פה מעט מורכבת יותר מהשרטוט המופיע למעלה, כדי לתת לה נופך מציאותי יותר.
###Code
is_boiler_on = input("Is your boiler turned on? [yes/no] ")
hour = int(input("Enter the hour (00-23): "))
minute = int(input("Enter the minute (00-59): "))
if is_boiler_on == 'yes':
is_boiler_on = True
else:
is_boiler_on = False
if not is_boiler_on and hour == 7 and minute > 0:
is_boiler_on = True
else:
if is_boiler_on:
is_boiler_on = False
if is_boiler_on:
boiler_status = "on"
else:
boiler_status = "off"
print("Boiler is " + boiler_status + " right now.")
###Output
_____no_output_____
###Markdown
מה המצב?טיפול במצבים מרובים בארצות הברית מדורגת מכירת אלבומים כך: אלבום מוזיקה נחשב "אלבום כסף" אם נמכרו ממנו לפחות 100,000 עותקים. אלבום מוזיקה נחשב "אלבום זהב" אם נמכרו ממנו לפחות 500,000 עותקים. אלבום מוזיקה נחשב "אלבום פלטינה" אם נמכרו ממנו לפחות 1,000,000 עותקים. אלבום מוזיקה נחשב "אלבום יהלום" אם נמכרו ממנו לפחות 10,000,000 עותקים. קלטו את כמות העותקים שנמכרו עבור אלבום המטאל המצליח "רוצח או נזלת", והדפיסו את דירוג האלבום. לדוגמה, אם המשתמש הכניס שמספר המכירות היה 520,196, הדפיסו "אלבום זהב". אם האלבום לא נמכר מספיק כדי לזכות בדירוג, הדפיסו "האלבום אינו רב־מכר". חשוב! פתרו לפני שתמשיכו! פתרון ושיפורים בתרגיל שנתבקשתם לפתור, נוצר מצב שבו יש צורך בתנאים מרובים שמשלימים זה את זה.נציג לפניכם שני פתרונות אפשריים:
###Code
copies_sold = int(input("How many copies were sold? "))
# אנחנו משתמשים באותיות גדולות עבור שמות המשתנים האלו.
# זו מוסכמה בין מתכנתים שמבקשת להדגיש שערכי המשתנים הם קבועים, ולא הולכים להשתנות במהלך התוכנית.
SILVER_ALBUM = 100000
GOLD_ALBUM = 500000
PLATINUM_ALBUM = 1000000
DIAMOND_ALBUM = 10000000
if copies_sold >= DIAMOND_ALBUM:
print("Diamond album")
else:
if copies_sold >= PLATINUM_ALBUM:
print("Platinum album")
else:
if copies_sold >= GOLD_ALBUM:
print("Gold album")
else:
if copies_sold >= SILVER_ALBUM:
print("Silver album")
else:
print("Your album is not a best-seller")
###Output
_____no_output_____
###Markdown
ודאי שמתם לב שהקוד נראה מעט מסורבל בגלל כמות ההזחות, והוא יוסיף ויסתרבל ככל שנוסיף יותר מקרים אפשריים.ננסה לפתור את זה בעזרת הגדרת טווחים מדויקים עבור כל דירוג.הקוד המשופץ ייראה כך:
###Code
copies_sold = int(input("How many copies were sold? "))
SILVER_ALBUM = 100000
GOLD_ALBUM = 500000
PLATINUM_ALBUM = 1000000
DIAMOND_ALBUM = 10000000
if copies_sold >= DIAMOND_ALBUM:
print("Diamond album")
if copies_sold >= PLATINUM_ALBUM and copies_sold < DIAMOND_ALBUM:
print("Platinum album")
if copies_sold >= GOLD_ALBUM and copies_sold < PLATINUM_ALBUM:
print("Gold album")
if copies_sold >= SILVER_ALBUM and copies_sold < GOLD_ALBUM:
print("Silver album")
if copies_sold < SILVER_ALBUM:
print("Your album is not a best-seller")
###Output
Diamond album
###Markdown
הקוד נראה טוב יותר במידה ניכרת, אבל בכל if אנחנו בודקים שהתנאי שלפניו לא התקיים, וזה מסורבל מאוד.אנחנו עושים את זה כדי למנוע הדפסה כפולה: אומנם כל אלבום זהב נמכר מספיק פעמים כדי להיות מוכתר כאלבום כסף, אבל לא נרצה להדפיס למשתמש את שתי ההודעות. מה היה קורה אם לא היו תנאים אחרי האופרטור הלוגי and? מחקו אותם, הכניסו לתוכנה כקלט 10000000 ובדקו מה התוצאה. חשוב! פתרו לפני שתמשיכו! אם אחרת – elif כדי לפתור את הבעיה שהוצגה למעלה, פייתון מעניקה לנו כלי נוסף שנקרא elif.מדובר בסך הכול בקיצור של else... if (תנאי), או בעברית: אם התנאי הקודם לא התקיים, בדוק אם...ראו, לדוגמה, איך נשפר את הקוד הקודם לקוד קריא יותר במידה רבה:
###Code
copies_sold = int(input("How many copies were sold? "))
SILVER_ALBUM = 100000
GOLD_ALBUM = 500000
PLATINUM_ALBUM = 1000000
DIAMOND_ALBUM = 10000000
if copies_sold >= DIAMOND_ALBUM:
print("Diamond album")
elif copies_sold >= PLATINUM_ALBUM:
print("Platinum album")
elif copies_sold >= GOLD_ALBUM:
print("Gold album")
elif copies_sold >= SILVER_ALBUM:
print("Silver album")
else:
print("Your album is not a best-seller")
###Output
_____no_output_____
###Markdown
מה קורה כאן? הטריק הוא שבשורה שבה כתוב elif, פייתון תנסה לבדוק את התנאי רק אם התנאים שלפניו לא התקיימו. במילים אחרות – ערכיהם של הביטויים הבוליאניים בכל התנאים שלפניו היו False. בכל שורה שבה יש if או elif, פייתון בודקת האם הביטוי הבוליאני שבאותה שורה מתקיים, ואז: אם כן, היא מבצעת את הפעולות המוזחות מתחת לתנאי ומפסיקה לבדוק את התנאים הבאים. אם לא, היא עוברת לבדוק את התנאי ב־elif־ים הבאים (אם ישנם elif־ים). אם אף אחד מהתנאים לא מתקיים, יתבצע קטע הקוד ששייך ל־else (אם יש else). ניתן לכתוב if בלי else ובלי elif־ים אחריו. if תמיד יהיה ראשון, ואם יש צורך להוסיף מקרים, else תמיד יהיה אחרון, וביניהם יהיו elif־ים. תרגול כניסה לבנק, שלב 2 שם המשתמש שלי לבנק הוא wrong, והסיסמה שלי היא ads sports.שם המשתמש של מנהל הבנק היא admin, והסיסמה שלו היא is trator.קבלו מהמשתמש שם משתמש וסיסמה, והדפיסו לו הודעה שמספרת לו לאיזה משתמש הוא הצליח להתחבר.אם לא הצליח להתחבר, הדפיסו לו הודעת שגיאה מתאימה. חשבון למתחילים, שלב 1 דני רוצה ללמוד חשבון, ולצורך כך הוא צריך מחשבון פשוט שיעזור לו.כתבו תוכנה שמקבלת 2 מספרים ופעולה חשבונית (+, -, *, / או **), ויודעת להחזיר את התשובה הנכונה לתרגיל.לדוגמה: עבור מספר ראשון 5, מספר שני 2 ופעולה / התוכנית תדפיס 2.5, כיוון ש־5/2 == 2.5. עבור מספר ראשון 9, מספר שני 2 ופעולה ** התוכנית תדפיס 81, כיוון ש־92 == 81. עבור מספר ראשון 3, מספר שני 7 ופעולה - התוכנית תדפיס -4, כיוון ש־3-7 == -4.
###Code
num1 = int(input("First number:"))
action = input("Action:")
num2 = int(input("Seconed number:"))
if action == '+':
print(num1 + num2)
elif action == '-':
print(num1 - num2)
elif action == '*':
print(num1 * num2)
elif action == '/':
print(num1 / num2)
elif action == '**':
print(num1 ** num2)
###Output
8
###Markdown
מחשבון מס הכנסה המיסוי על הכנסה בישראל גבוה מאוד ושיעורו נקבע לפי מדרגות כדלקמן: מי שמרוויח עד 6,310 ש"ח, משלם מס בשיעור 10% על הסכום הזה. מי שמרוויח עד 9,050 ש"ח, משלם מס בשיעור 14% על הסכום הזה. מי שמרוויח עד 14,530 ש"ח, משלם מס בשיעור 20% על הסכום הזה. מי שמרוויח עד 20,200 ש"ח, משלם מס בשיעור 31% על הסכום הזה. מי שמרוויח עד 42,030 ש"ח, משלם מס בשיעור 35% על הסכום הזה. מי שמרוויח עד 54,130 ש"ח, משלם מס בשיעור 47% על הסכום הזה. מי שמרוויח מעל הסכום האחרון, משלם מס בשיעור 50% על הסכום הזה. הסכום תמיד משולם על ההפרש בין המדרגות. לדוגמה, אם הרווחתי בחודש מסוים 10,000 ש"ח, תשלום המיסים שלי יחושב כך: על 6,310 השקלים הראשונים אשלם 631 ש"ח, שהם 10% מאותה מדרגה. על הסכום העודף עד 9,050 שקלים, שהוא 2,740 שקלים (9,050 - 6,310) אשלם 383.6 שקלים, שהם 14% מ־2,740 שקלים. על הסכום העודף בסך 950 שקלים (10,000 - 9,050) אשלם 190 ש"ח, שהם 20% מ־950 שקלים. בסך הכול, אשלם למס הכנסה באותו חודש 631 + 383.6 + 190 ש"ח, שהם 1,204.6 שקלים. כתבו קוד למחשבון שמקבל את השכר החודשי שלכם ומדפיס את סכום המס שתשלמו. ירוץ אם נתקן, אחרת... בקוד הבא נפלו שגיאות רבות. תקנו אותו והריצו אותו כך שיעבוד ושידפיס הודעה אחת בלבד עבור כל מצב.
###Code
age = int(input("Please enter your age: "))
if age < 0:
print("Your age is invalid.")
elif age < 18:
print("Younger than 18.")
else :
print("You are so old!")
###Output
You are so old!
###Markdown
תנאים – חלק 2 תנאים – תזכורת ניזכר במחברת הקודמת, שבה למדנו על תנאים.למדנו שבעזרת מילת המפתח if אנחנו יכולים לבקש מהקוד שלנו לבצע סדרת פעולות, רק אם תנאי כלשהו מתקיים.במילים אחרות: אנחנו יכולים לבקש מקטע קוד לרוץ, רק אם ביטוי בוליאני מסוים שווה ל־True. נראה דוגמה:
###Code
shoes_in_my_drawer = int(input("How many shoes do you have in your drawer? "))
if shoes_in_my_drawer % 2 == 1:
print("You have an odd number of shoes. Something is wrong!")
###Output
_____no_output_____
###Markdown
בקוד שלמעלה, ביקשנו מהמשתמש להזין את מספר הנעליים שיש לו, ואם המספר היה אי־זוגי הדפסנו לו שמשהו מוזר מתרחש.אבל מה קורה אם נרצה להדפיס למשתמש הודעת אישור כשאנחנו מגלים שהכול בסדר? כיצד הייתם משפרים את התוכנית שלמעלה כך שתדפיס למשתמש הודעת אישור שהכול בסדר? השתמשו בכלים שרכשתם במחברת הקודמת. נסו לחשוב על לפחות 2 דרכים דומות. חשוב! פתרו לפני שתמשיכו! מה אם לא?על מילת המפתח else דרך אחת לפתור את התרגיל שהופיע למעלה, היא זו:
###Code
shoes_in_my_drawer = int(input("How many shoes do you have in your drawer? "))
if shoes_in_my_drawer % 2 == 1:
print("You have an odd number of shoes. Something is wrong!")
if shoes_in_my_drawer % 2 != 1:
print("You have an even number of shoes. Congratulations!")
###Output
_____no_output_____
###Markdown
כדי להדפיס למשתמש הודעה מתאימה בכל מצב, הוספנו תנאי הפוך מהתנאי הראשון, שידפיס למשתמש הודעה מתאימה.אחד הדברים המעניינים בתוכנית שלמעלה, הוא שיש לנו שני תנאים שמנוגדים זה לזה במשמעות שלהם. אחד בודק זוגיות, והשני בודק אי־זוגיות.למעשה, אם היינו רוצים לנסח את הקוד במילים, היינו יכולים להגיד: אם מספר הנעליים הוא אי־זוגי, הדפס שיש בעיה. אחרת, הדפס שהכול בסדר. פייתון מקנה לנו כלי נוח לבטא את ה"אחרת" הזה, כלי שמקל על הקריאוּת של הקוד – מילת המפתח else. נראה איך אפשר לנסח את הקוד שלמעלה בעזרת else:
###Code
shoes_in_my_drawer = int(input("How many shoes do you have in your drawer? "))
if shoes_in_my_drawer % 2 == 1:
print("You have an odd number of shoes. Something is wrong!")
else:
print("You have an even number of shoes. Congratulations!")
###Output
_____no_output_____
###Markdown
שימו לב לצורת ההזחה: ה־else אינו מוזח, אך התוכן שבתוכו כן.נזכור גם ש־else קשור ל־if שלפניו, ומדבר על המקרה המשלים לתנאי שנמצא באותו if.דרך נוספת לחשוב על else היא שקטע הקוד בתוך ה־else יתבצע אם תוצאתו של הביטוי הבוליאני שמופיע כתנאי של ה־if היא False. דוגמאות לתנאים עם elseבדוגמאות הבאות, התנאי מופיע כך, והפעולות שיקרו בעקבותיו מופיעות כך.אם השעה היא לפני 20:00, שמור על תאורה גבוהה במסעדה. אחרת, עמעם את התאורה.אם הגיל של המשתמש הוא לפחות 18, אפשר לו להיכנס לבר. אחרת, הצע לו אטרקציות אחרות לבקר בהן וגם אל תכניס אותו.אם המשתמש בכספומט הזין סכום שלילי, או יותר מהסכום הזמין בחשבון שלו, הצג לו הודעת שגיאה. אחרת, הפחת לו את הסכום מהחשבון, הוצא לו שטרות בסכום שהזין.אם הדוד כבוי וגם השעה היא לפני 8:00 וגם השעה היא אחרי 7:00, הדלק את הדוד. אחרת, אם הדוד דלוק – כבה את הדוד. זרימת התוכנית: ציור לדוגמה מימוש לדוגמה התוכנית המוצגת פה מעט מורכבת יותר מהשרטוט המופיע למעלה, כדי לתת לה נופך מציאותי יותר.
###Code
is_boiler_on = input("Is your boiler turned on? [yes/no] ")
hour = int(input("Enter the hour (00-23): "))
minute = int(input("Enter the minute (00-59): "))
if is_boiler_on == 'yes':
is_boiler_on = True
else:
is_boiler_on = False
if not is_boiler_on and hour == 7 and minute > 0:
is_boiler_on = True
else:
if is_boiler_on:
is_boiler_on = False
if is_boiler_on:
boiler_status = "on"
else:
boiler_status = "off"
print("Boiler is " + boiler_status + " right now.")
###Output
_____no_output_____
###Markdown
מה המצב?טיפול במצבים מרובים בארצות הברית מדורגת מכירת אלבומים כך:אלבום מוזיקה נחשב "אלבום כסף" אם נמכרו ממנו לפחות 100,000 עותקים.אלבום מוזיקה נחשב "אלבום זהב" אם נמכרו ממנו לפחות 500,000 עותקים.אלבום מוזיקה נחשב "אלבום פלטינה" אם נמכרו ממנו לפחות 1,000,000 עותקים.אלבום מוזיקה נחשב "אלבום יהלום" אם נמכרו ממנו לפחות 10,000,000 עותקים. קלטו את כמות העותקים שנמכרו עבור אלבום המטאל המצליח "רוצח או נזלת", והדפיסו את דירוג האלבום. לדוגמה, אם המשתמש הכניס שמספר המכירות היה 520,196, הדפיסו "אלבום זהב". אם האלבום לא נמכר מספיק כדי לזכות בדירוג, הדפיסו "האלבום אינו רב־מכר".חשוב! פתרו לפני שתמשיכו! פתרון ושיפורים בתרגיל שנתבקשתם לפתור, נוצר מצב שבו יש צורך בתנאים מרובים שמשלימים זה את זה.נציג לפניכם שני פתרונות אפשריים:
###Code
copies_sold = int(input("How many copies were sold? "))
# אנחנו משתמשים באותיות גדולות עבור שמות המשתנים האלו.
# זו מוסכמה בין מתכנתים שמבקשת להדגיש שערכי המשתנים הם קבועים, ולא הולכים להשתנות במהלך התוכנית.
SILVER_ALBUM = 100000
GOLD_ALBUM = 500000
PLATINUM_ALBUM = 1000000
DIAMOND_ALBUM = 10000000
if copies_sold >= DIAMOND_ALBUM:
print("Diamond album")
else:
if copies_sold >= PLATINUM_ALBUM:
print("Platinum album")
else:
if copies_sold >= GOLD_ALBUM:
print("Gold album")
else:
if copies_sold >= SILVER_ALBUM:
print("Silver album")
else:
print("Your album is not a best-seller")
###Output
_____no_output_____
###Markdown
ודאי שמתם לב שהקוד נראה מעט מסורבל בגלל כמות ההזחות, והוא יוסיף ויסתרבל ככל שנוסיף יותר מקרים אפשריים.ננסה לפתור את זה בעזרת הגדרת טווחים מדויקים עבור כל דירוג.הקוד המשופץ ייראה כך:
###Code
copies_sold = int(input("How many copies were sold? "))
SILVER_ALBUM = 100000
GOLD_ALBUM = 500000
PLATINUM_ALBUM = 1000000
DIAMOND_ALBUM = 10000000
if copies_sold >= DIAMOND_ALBUM:
print("Diamond album")
if copies_sold >= PLATINUM_ALBUM and copies_sold < DIAMOND_ALBUM:
print("Platinum album")
if copies_sold >= GOLD_ALBUM and copies_sold < PLATINUM_ALBUM:
print("Gold album")
if copies_sold >= SILVER_ALBUM and copies_sold < GOLD_ALBUM:
print("Silver album")
if copies_sold < SILVER_ALBUM:
print("Your album is not a best-seller")
###Output
_____no_output_____
###Markdown
הקוד נראה טוב יותר במידה ניכרת, אבל בכל if אנחנו בודקים שהתנאי שלפניו לא התקיים, וזה מסורבל מאוד.אנחנו עושים את זה כדי למנוע הדפסה כפולה: אומנם כל אלבום זהב נמכר מספיק פעמים כדי להיות מוכתר כאלבום כסף, אבל לא נרצה להדפיס למשתמש את שתי ההודעות. מה היה קורה אם לא היו תנאים אחרי האופרטור הלוגי and? מחקו אותם, הכניסו לתוכנה כקלט 10000000 ובדקו מה התוצאה. חשוב! פתרו לפני שתמשיכו! אם אחרת – elif כדי לפתור את הבעיה שהוצגה למעלה, פייתון מעניקה לנו כלי נוסף שנקרא elif.מדובר בסך הכול בקיצור של else... if (תנאי), או בעברית: אם התנאי הקודם לא התקיים, בדוק אם...ראו, לדוגמה, איך נשפר את הקוד הקודם לקוד קריא יותר במידה רבה:
###Code
copies_sold = int(input("How many copies were sold? "))
SILVER_ALBUM = 100000
GOLD_ALBUM = 500000
PLATINUM_ALBUM = 1000000
DIAMOND_ALBUM = 10000000
if copies_sold >= DIAMOND_ALBUM:
print("Diamond album")
elif copies_sold >= PLATINUM_ALBUM:
print("Platinum album")
elif copies_sold >= GOLD_ALBUM:
print("Gold album")
elif copies_sold >= SILVER_ALBUM:
print("Silver album")
else:
print("Your album is not a best-seller")
###Output
_____no_output_____
###Markdown
מה קורה כאן? הטריק הוא שבשורה שבה כתוב elif, פייתון תנסה לבדוק את התנאי רק אם התנאים שלפניו לא התקיימו. במילים אחרות – ערכיהם של הביטויים הבוליאניים בכל התנאים שלפניו היו False. בכל שורה שבה יש if או elif, פייתון בודקת האם הביטוי הבוליאני שבאותה שורה מתקיים, ואז:אם כן, היא מבצעת את הפעולות המוזחות מתחת לתנאי ומפסיקה לבדוק את התנאים הבאים.אם לא, היא עוברת לבדוק את התנאי ב־elif־ים הבאים (אם ישנם elif־ים).אם אף אחד מהתנאים לא מתקיים, יתבצע קטע הקוד ששייך ל־else (אם יש else). ניתן לכתוב if בלי else ובלי elif־ים אחריו.if תמיד יהיה ראשון, ואם יש צורך להוסיף מקרים, else תמיד יהיה אחרון, וביניהם יהיו elif־ים. תרגול כניסה לבנק, שלב 2 שם המשתמש שלי לבנק הוא wrong, והסיסמה שלי היא ads sports.שם המשתמש של מנהל הבנק היא admin, והסיסמה שלו היא is trator.קבלו מהמשתמש שם משתמש וסיסמה, והדפיסו לו הודעה שמספרת לו לאיזה משתמש הוא הצליח להתחבר.אם לא הצליח להתחבר, הדפיסו לו הודעת שגיאה מתאימה. חשבון למתחילים, שלב 1 דני רוצה ללמוד חשבון, ולצורך כך הוא צריך מחשבון פשוט שיעזור לו.כתבו תוכנה שמקבלת 2 מספרים ופעולה חשבונית (+, -, *, / או **), ויודעת להחזיר את התשובה הנכונה לתרגיל.לדוגמה:עבור מספר ראשון 5, מספר שני 2 ופעולה / התוכנית תדפיס 2.5, כיוון ש־5/2 == 2.5.עבור מספר ראשון 9, מספר שני 2 ופעולה ** התוכנית תדפיס 81, כיוון ש־92 == 81.עבור מספר ראשון 3, מספר שני 7 ופעולה - התוכנית תדפיס -4, כיוון ש־3-7 == -4. מחשבון מס הכנסה המיסוי על הכנסה בישראל גבוה מאוד ושיעורו נקבע לפי מדרגות כדלקמן:מי שמרוויח עד 6,310 ש"ח, משלם מס בשיעור 10% על הסכום הזה.מי שמרוויח עד 9,050 ש"ח, משלם מס בשיעור 14% על הסכום הזה.מי שמרוויח עד 14,530 ש"ח, משלם מס בשיעור 20% על הסכום הזה.מי שמרוויח עד 20,200 ש"ח, משלם מס בשיעור 31% על הסכום הזה.מי שמרוויח עד 42,030 ש"ח, משלם מס בשיעור 35% על הסכום הזה.מי שמרוויח עד 54,130 ש"ח, משלם מס בשיעור 47% על הסכום הזה.מי שמרוויח מעל הסכום האחרון, משלם מס בשיעור 50% על הסכום הזה. הסכום תמיד משולם על ההפרש בין המדרגות. לדוגמה, אם הרווחתי בחודש מסוים 10,000 ש"ח, תשלום המיסים שלי יחושב כך: על 6,310 השקלים הראשונים אשלם 631 ש"ח, שהם 10% מאותה מדרגה. על הסכום העודף עד 9,050 שקלים, שהוא 2,740 שקלים (9,050 - 6,310) אשלם 383.6 שקלים, שהם 14% מ־2,740 שקלים. על הסכום העודף בסך 950 שקלים (10,000 - 9,050) אשלם 190 ש"ח, שהם 20% מ־950 שקלים. בסך הכול, אשלם למס הכנסה באותו חודש 631 + 383.6 + 190 ש"ח, שהם 1,204.6 שקלים. כתבו קוד למחשבון שמקבל את השכר החודשי שלכם ומדפיס את סכום המס שתשלמו. ירוץ אם נתקן, אחרת... בקוד הבא נפלו שגיאות רבות. תקנו אותו והריצו אותו כך שיעבוד ושידפיס הודעה אחת בלבד עבור כל מצב.
###Code
age = int(input("Please enter your age: ")))
if age < 0:
print("Your age is invalid."
if age < 18:
print("Younger than 18.")
else
print("You are so old!")
###Output
_____no_output_____
###Markdown
תנאים – חלק 2 תנאים – תזכורת נזכר במחברת הקודמת בה למדנו על תנאים.למדנו שבעזרת מילת המפתח if אנחנו יכולים לבקש מהקוד שלנו לבצע סדרת פעולות, רק אם תנאי כלשהו מתקיים.במילים אחרות: אנחנו יכולים לבקש מקטע קוד לרוץ, רק אם ביטוי בוליאני מסוים שווה ל־True. נראה דוגמה:
###Code
shoes_in_my_drawer = int(input("How many shoes do you have in your drawer? "))
if shoes_in_my_drawer % 2 == 1:
print("You have an odd number of shoes. Something is wrong!")
###Output
_____no_output_____
###Markdown
בקוד למעלה, ביקשנו מהמשתמש להזין את מספר הנעליים שיש לו, ואם המספר היה אי־זוגי הדפסנו לו שמשהו מוזר מתרחש.אבל מה קורה אם נרצה להדפיס למשתמש הודעת אישור כשאנחנו מגלים שהכל בסדר? כיצד הייתם משפרים את התוכנית שלמעלה כך שתדפיס למשתמש הודעת אישור שהכל בסדר? השתמשו בכלים שרכשתם במחברת הקודמת. נסו לחשוב על לפחות 2 דרכים דומות. חשוב! פתרו לפני שתמשיכו! מה אם לא?על מילת המפתח else דרך אחת לפתור את התרגול שהופיע למעלה, היא זו:
###Code
shoes_in_my_drawer = int(input("How many shoes do you have in your drawer? "))
if shoes_in_my_drawer % 2 == 1:
print("You have an odd number of shoes. Something is wrong!")
if shoes_in_my_drawer % 2 != 1:
print("You have an even number of shoes. Congratulations!")
###Output
_____no_output_____
###Markdown
כדי להדפיס למשתמש הודעה מתאימה בכל מצב, הוספנו תנאי הפוך מהתנאי הראשון, שידפיס למשתמש הודעה מתאימה.אחד הדברים המעניינים בתוכנית שלמעלה, היא שיש לנו שני תנאים הפוכים שמנוגדים אחד לשני במשמעות שלהם. אחד בודק זוגיות, והשני בודק אי־זוגיות.למעשה, אם היינו רוצים לנסח את הקוד במילים, היינו יכולים להגיד: אם מספר הנעליים הוא אי־זוגי, הדפס שיש בעיה. אחרת, הדפס שהכל בסדר. פייתון מקנה לנו כלי נוח להתייחס ל"אחרת" הזה, שמקל על הקריאות של הקוד – מילת המפתח else. נראה איך היינו מנסחים את הקוד שלמעלה בעזרת else:
###Code
shoes_in_my_drawer = int(input("How many shoes do you have in your drawer? "))
if shoes_in_my_drawer % 2 == 1:
print("You have an odd number of shoes. Something is wrong!")
else:
print("You have an even number of shoes. Congratulations!")
###Output
_____no_output_____
###Markdown
שימו לב לצורת ההזחה: ה־else אינו מוזח, אך התוכן שבתוכו כן.נזכור גם ש־else קשור ל־if שלפניו, ומדבר על המקרה המשלים לתנאי שנמצא באותו if.דרך נוספת לחשוב על else היא שקטע הקוד בתוך ה־else יתבצע אם תוצאתו של הביטוי הבוליאני שמופיע כתנאי של ה־if היא False. דוגמאות לתנאים עם elseבדוגמאות הבאות, התנאי מופיע כך, והפעולות שיקרו בעקבותיו מופיעות כך.אם השעה היא לפני 20:00 בערב, שמור על תאורה גבוהה במסעדה. אחרת, עמעם את התאורה.אם הגיל של המשתמש הוא לפחות 18, אפשר לו להכנס לבר. אחרת, הצע לו אטרקציות אחרות לבקר בהן וגם אל תכניס אותו.אם המשתמש בכספומט הזין מספר שלילי, או יותר מהסכום הזמין בחשבון שלו, הצג לו הודעת שגיאה. אחרת, הורד לו את הסכום מהחשבון, הוצא לו שטרות בסכום שהזין.אם הדוד כבוי וגם השעה היא לפני 8 בבוקר וגם השעה היא אחרי 7 בבוקר, הדלק את הדוד. אחרת, אם הדוד דלוק – כבה את הדוד. זרימת התוכנית: ציור לדוגמה מימוש לדוגמה התוכנית המוצגת פה טיפה יותר מורכבת מהשרטוט המופיע למעלה, כדי לתת לה נופך טיפה יותר מציאותי.
###Code
is_boiler_on = input("Is your boiler turned on? [yes/no] ")
hour = int(input("Enter the hour (00-23): "))
minute = int(input("Enter the minute (00-59): "))
if is_boiler_on == 'yes':
is_boiler_on = True
else:
is_boiler_on = False
if not is_boiler_on and hour == 7 and minute > 0:
is_boiler_on = True
else:
if is_boiler_on:
is_boiler_on = False
if is_boiler_on:
boiler_status = "on"
else:
boiler_status = "off"
print("Boiler is " + boiler_status + " right now.")
###Output
_____no_output_____
###Markdown
מה המצב?טיפול במצבים מרובים בארצות הברית מדורגת מכירת אלבומים באופן הבא: אלבום מוזיקה נחשב "אלבום כסף" אם נמכרו ממנו לפחות 100,000 עותקים. אלבום מוזיקה נחשב "אלבום זהב" אם נמכרו ממנו לפחות 500,000 עותקים. אלבום מוזיקה נחשב "אלבום פלטינה" אם נמכרו ממנו לפחות 1,000,000 עותקים. אלבום מוזיקה נחשב "אלבום יהלום" אם נמכרו ממנו לפחות 10,000,000 עותקים. קלטו את כמות העותקים שנמכרו עבור אלבום המטאל המצליח "רוצח או נזלת", והדפיסו את דירוג האלבום. לדוגמה, אם המשתמש הכניס שמספר המכירות היה 520,196, הדפיסו "אלבום זהב". אם האלבום לא נמכר מספיק כדי להיות זכאי לדירוג, הדפיסו "האלבום אינו רב־מכר". חשוב! פתרו לפני שתמשיכו! פתרון ושיפורים בתרגיל שנתבקשתם לפתור, נוצר מצב שבו יש צורך בתנאים מרובים שמשלימים אחד את השני.נציג לפניכם שני פתרונות אפשריים:
###Code
copies_sold = int(input("How many copies were sold? "))
# אנחנו משתמשים באותיות גדולות עבור שמות המשתנים האלו.
# זו מוסכמה בין מתכנתים שמבקשת להדגיש שערכי המשתנים הם קבועים, ולא הולכים להשתנות במהלך התוכנית.
SILVER_ALBUM = 100000
GOLD_ALBUM = 500000
PLATINUM_ALBUM = 1000000
DIAMOND_ALBUM = 10000000
if copies_sold >= DIAMOND_ALBUM:
print("Diamond album")
else:
if copies_sold >= PLATINUM_ALBUM:
print("Platinum album")
else:
if copies_sold >= GOLD_ALBUM:
print("Gold album")
else:
if copies_sold >= SILVER_ALBUM:
print("Silver album")
else:
print("Your album is not a best-seller")
###Output
_____no_output_____
###Markdown
ודאי שמתם לב שהקוד נראה מעט מסורבל בגלל כמות ההזחות, והוא יוסיף ויסתרבל ככל שנוסיף יותר מקרים אפשריים.ננסה לפתור את זה בעזרת הגדרת טווחים מדויקים עבור כל דירוג.הקוד המשופץ יראה כך:
###Code
copies_sold = int(input("How many copies were sold? "))
SILVER_ALBUM = 100000
GOLD_ALBUM = 500000
PLATINUM_ALBUM = 1000000
DIAMOND_ALBUM = 10000000
if copies_sold >= DIAMOND_ALBUM:
print("Diamond album")
if copies_sold >= PLATINUM_ALBUM and copies_sold < DIAMOND_ALBUM:
print("Platinum album")
if copies_sold >= GOLD_ALBUM and copies_sold < PLATINUM_ALBUM:
print("Gold album")
if copies_sold >= SILVER_ALBUM and copies_sold < GOLD_ALBUM:
print("Silver album")
if copies_sold < SILVER_ALBUM:
print("Your album is not a best-seller")
###Output
_____no_output_____
###Markdown
הקוד נראה משמעותית טוב יותר, אבל בכל if אנחנו בודקים שהתנאי שלפניו לא התקיים, וזה מסורבל מאוד.אנחנו עושים את זה כדי למנוע הדפסה כפולה: אמנם כל אלבום זהב נמכר מספיק פעמים כדי להיות מוכתר כאלבום כסף, אבל לא נרצה להדפיס למשתמש את שתי ההודעות. מה היה קורה אם לא היו את התנאים שאחרי האופרטור הלוגי and? מחקו אותם, הכניסו לתוכנה כקלט 10000000 ובדקו מה התוצאה. חשוב! פתרו לפני שתמשיכו! אם אחרת – elif כדי לפתור את הבעיה שהוצגה למעלה, פייתון מעניק לנו כלי נוסף שנקרא elif.מדובר בסך־הכל בקיצור של else... if (תנאי), או בעברית: אם התנאי הקודם לא התקיים, בדוק האם...ראו, לדוגמה, איך נשפר את הקוד הקודם לקוד משמעותית קריא יותר:
###Code
copies_sold = int(input("How many copies were sold? "))
SILVER_ALBUM = 100000
GOLD_ALBUM = 500000
PLATINUM_ALBUM = 1000000
DIAMOND_ALBUM = 10000000
if copies_sold >= DIAMOND_ALBUM:
print("Diamond album")
elif copies_sold >= PLATINUM_ALBUM:
print("Platinum album")
elif copies_sold >= GOLD_ALBUM:
print("Gold album")
elif copies_sold >= SILVER_ALBUM:
print("Silver album")
else:
print("Your album is not a best-seller")
###Output
_____no_output_____
###Markdown
מה קורה כאן? הטריק הוא שבשורה בה כתוב elif, פייתון ינסה לבדוק את התנאי רק אם התנאים שלפניו לא התקיימו. במילים אחרות – ערכיהם של הביטויים הבוליאניים בכל התנאים שלפניו היו False. בכל שורה שבה יש if או elif, פייתון בודקת האם הביטוי הבוליאני שבאותה שורה מתקיים, ואז: אם כן, היא מבצעת את הפעולות המוזחות מתחת לתנאי ומפסיקה לבדוק את התנאים הבאים. אם לא, היא תעבור לבדוק את התנאי ב־elifים הבאים (אם ישנם elifים). אם אף אחד מהתנאים לא התקיים, יתבצע קטע הקוד ששייך ל־else (אם יש else).. ניתן לכתוב if בלי else ובלי elifים אחריו. if תמיד יהיה ראשון, ואם יש צורך להוסיף מקרים, else תמיד יהיה אחרון, וביניהם יהיו elifים. תרגולים כניסה לבנק, שלב 2 שם המשתמש שלי לבנק הוא wrong, והסיסמה שלי היא ads sports.שם המשתמש של מנהל הבנק היא admin, והסיסמה שלו היא is trator.קבלו מהמשתמש שם משתמש וסיסמה, והדפיסו לו הודעה שמספרת לו לאיזה משתמש הוא הצליח להתחבר.אם לא הצליח להתחבר, הדפיסו לו הודעת שגיאה מתאימה. חשבון למתחילים, שלב 1 דני רוצה ללמוד חשבון, ולצורך כך הוא צריך מחשבון פשוט שיעזור לו.כתבו תוכנה שמקבלת 2 מספרים ופעולה חשבונית (+, -, *, / או **), ויודעת להחזיר את התשובה הנכונה לתרגיל.לדוגמה: עבור מספר ראשון 5, מספר שני 2 ופעולה / התוכנית תדפיס 2.5, מכיוון ש־5/2 == 2.5. עבור מספר ראשון 9, מספר שני 2 ופעולה ** התוכנית תדפיס 81, מכיוון ש־92 == 81. עבור מספר ראשון 3, מספר שני 7 ופעולה - התוכנית תדפיס -4, מכיוון ש־3-7 == -4. מחשבון מס הכנסה בישראל, המיסוי גבוה במיוחד. מי שמרוויח עד 6,310 ש"ח, משלם 10% מס על הסכום הזה. מי שמרוויח עד 9,050 ש"ח, משלם 14% מס על הסכום הזה. מי שמרוויח עד 14,530 ש"ח, משלם 20% מס על הסכום הזה. מי שמרוויח עד 20,200 ש"ח, משלם 31% מס על הסכום הזה. מי שמרוויח עד 42,030 ש"ח, משלם 35% מס על הסכום הזה. מי שמרוויח עד 54,130 ש"ח, משלם 47% מס על הסכום הזה. מי שמרוויח מעל הסכום האחרון, משלם 50% מס על הסכום הזה. הסכום תמיד משולם על ההפרש בין המדרגות. לדוגמה, אם הרווחתי בחודש מסוים 10,000 ש"ח, תשלום המיסים שלי יחושב כך: על 6,310 השקלים הראשונים, אשלם 631 ש"ח, שהם 10% מאותה מדרגה. על הסכום העודף עד 9,050 שקלים, שהוא 2,740 שקלים (9,050 - 6,310) אשלם 383.6 שקלים, שהם 14% מ־2,740 שקלים. על הסכום העודף בסך 950 שקלים (10,000 - 9,050) אשלם 190 ש"ח, שהם 20% מ־950 שקלים. בסך הכל, אשלם למס הכנסה באותו חודש 631 + 383.6 + 190 ש"ח, שהם 1204.6 שקלים. כתבו מחשבון שמקבל את השכר החודשי שלכם ומדפיס את כמות המיסים שתשלמו באותו חודש. ירוץ אם נתקן, אחרת... בקוד הבא נפלו שגיאות רבות. תקנו אותו והריצו אותו כך שיעבוד וידפיס הודעה אחת בלבד עבור כל מצב.
###Code
age = int(input("Please enter your age: ")))
if age < 0:
print("Your age is invalid."
if age < 18:
print("Younger than 18.")
else
print("You are so old!")
###Output
_____no_output_____ |
web_programming/quiz/quiz.ipynb | ###Markdown
아래 URL의 NBA 데이터를 크롤링하여 판다스 데이터 프레임으로 나타내세요. - http://stats.nba.com/teams/traditional/?sort=GP&dir=-1
###Code
from selenium import webdriver
url = "http://stats.nba.com/teams/traditional/?sort=GP&dir=-1"
driver = webdriver.Chrome()
driver.get(url)
columns = driver.find_elements_by_css_selector("body > main > div.stats-container__inner > div > div.row > div > div > nba-stat-table > div.nba-stat-table > div.nba-stat-table__overflow > table > thead > tr > th")
rows = driver.find_elements_by_css_selector("body > main > div.stats-container__inner > div > div.row > div > div > nba-stat-table > div.nba-stat-table > div.nba-stat-table__overflow > table > tbody > tr")
cols = []
for column in columns[1:]:
cols.append(column.text)
cols = list(filter("".__ne__,cols))
print(len(cols), cols)
dic = {}
row_count = 1
for row in rows:
datas = []
for i in range(2,len(cols)+2):
data = row.find_element_by_css_selector("td:nth-child("+str(i)+")").text
datas.append(data)
dic[row_count] = datas
row_count += 1
len(dic[1]), len(cols)
for row in rows:
print(row.find_element_by_css_selector("td:nth-child(28)").text)
dic
df = pd.DataFrame(columns=cols)
for i in range(1,len(dic)+1):
df.loc[len(df)] = dic[i]
df = df.sort_values(by="WIN%",ascending=False)
df.reset_index(drop=True, inplace=True)
df
driver.quit()
###Output
_____no_output_____
###Markdown
셀레니움을 이용하여 네이버 IT/과학 기사의 10 페이지 까지의 최신 제목 리스트를 크롤링하세요.- http://news.naver.com/main/main.nhn?mode=LSD&mid=shm&sid1=105
###Code
from selenium import webdriver
url = "http://news.naver.com/main/main.nhn?mode=LSD&mid=shm&sid1=105"
driver = webdriver.Chrome()
driver.get(url)
# section파트만 가져온다.
section = driver.find_element_by_css_selector("#section_body")
# 기사 제목 가져오기(첫 페이지 테스트)
# titles = driver.find_elements_by_css_selector("ul > li > dl > dt:nth-child(2)")
titles = section.find_elements_by_css_selector("ul > li > dl > dt:not(.photo)")
len(titles)
# 기사 제목 가져오기(첫 페이지 테스트)
ls = []
for title in titles:
ls.append(title.text)
ls
# 1~10페이지 기사 가져오기
import time
pages = 10
page_count = 1
dic = {}
for page in range(1,pages+1):
ls = []
section = driver.find_element_by_css_selector("#section_body")
titles = section.find_elements_by_css_selector("ul > li > dl > dt:not(.photo)")
# print(titles)
for title in titles:
ls.append(title.text)
print(page_count)
dic[page_count] = ls
page_count += 1
driver.find_element_by_css_selector("#paging > a:nth-child(" + str(page_count) + ")").click()
time.sleep(2)
driver.quit()
###Output
_____no_output_____ |
3.0/Project_3.0.ipynb | ###Markdown
Predicting Dengue Muhammad Fuzail Zubari 18101135
###Code
import pandas as pd
import numpy as np
import collections
import seaborn as sns
import collections
import matplotlib.cm as cm
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
from statsmodels.tools import eval_measures
import statsmodels.formula.api as smf
import statsmodels.api as sm
from collections import Counter
from datetime import datetime, date, timedelta
from IPython.display import Image
%matplotlib inline
# Loading Data
dengue_features_train = pd.read_csv('dengue_features_train.csv')
dengue_features_test = pd.read_csv('dengue_features_test.csv')
dengue_labels_train = pd.read_csv('dengue_labels_train.csv')
dengue_features_train.head()
dengue_features_test.head()
###Output
_____no_output_____
###Markdown
Data Preprocessing Test Data
###Code
dengue_features_test.isnull().sum()
#Check duplicate rows
np.sum(dengue_features_test.duplicated())
dengue_features_test.fillna(method='ffill', inplace=True)
dengue_features_test.head()
dengue_features_test.isnull().sum()
###Output
_____no_output_____
###Markdown
Data Preprocessing Train Data
###Code
dengue_features_train = pd.merge(dengue_labels_train, dengue_features_train, on=['city','year','weekofyear'])
dengue_features_train.isnull().sum()
# Check for duplicated values
np.sum(dengue_features_train.duplicated())
# Forward or backward 'NaN' data filling
dengue_features_train.fillna(method='ffill', inplace=True)
dengue_features_train.isnull().sum()
###Output
_____no_output_____
###Markdown
EDA
###Code
# Calculating the correlation
correlations = dengue_features_train.corr()
cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True)
def magnify():
return [dict(selector="th",
props=[("font-size", "7pt")]),
dict(selector="td",
props=[('padding', "0em 0em")]),
dict(selector="th:hover",
props=[("font-size", "12pt")]),
dict(selector="tr:hover td:hover",
props=[('max-width', '200px'),
('font-size', '12pt')])
]
correlations.style.background_gradient(cmap, axis=1)\
.set_properties(**{'max-width': '80px', 'font-size': '10pt'})\
.set_caption("Hover to magify")\
.set_precision(2)\
.set_table_styles(magnify())
# Dropping columns with negative corellation in both
columns_to_drop = ['reanalysis_tdtr_k', 'year', 'station_diur_temp_rng_c', 'ndvi_nw', 'weekofyear', 'ndvi_ne',
'ndvi_se', 'ndvi_sw', 'reanalysis_max_air_temp_k', 'reanalysis_relative_humidity_percent',
'reanalysis_max_air_temp_k']
# Remove `week_start_date` string.
dengue_features_train.drop(columns_to_drop, axis=1, inplace=True)
dengue_features_test.drop(columns_to_drop, axis=1, inplace=True)
dengue_features_train.count()
###Output
_____no_output_____
###Markdown
Training and Testing of Data
###Code
subtrain = dengue_features_train.head(1000)
subtest = dengue_features_train.tail(dengue_features_train.shape[0] - 1000)
def get_best_model(train, test):
# Step 1: specify the form of the model
model_formula = "total_cases ~ 1 + " \
"reanalysis_specific_humidity_g_per_kg + " \
"reanalysis_dew_point_temp_k + " \
"reanalysis_min_air_temp_k + " \
"station_min_temp_c + " \
"station_max_temp_c + " \
"station_avg_temp_c + " \
"reanalysis_air_temp_k"
grid = 10 ** np.arange(-8, -3, dtype=np.float64)
best_alpha = []
best_score = 1000
# Step 2: Find the best hyper parameter, alpha
for alpha in grid:
model = smf.glm(formula=model_formula,
data=train,
family=sm.families.NegativeBinomial(alpha=alpha))
results = model.fit()
predictions = results.predict(test).astype(int)
score = eval_measures.meanabs(predictions, test.total_cases)
if score < best_score:
best_alpha = alpha
best_score = score
print('Alpha = ', best_alpha)
print('Score = ', best_score)
# Step 3: refit on entire dataset
full_dataset = pd.concat([train, test])
model = smf.glm(formula=model_formula,
data=full_dataset,
family=sm.families.NegativeBinomial(alpha=best_alpha))
fitted_model = model.fit()
return fitted_model
best_model = get_best_model(subtrain, subtest)
predictions = best_model.predict(dengue_features_test).astype(int)
print(predictions)
submission = pd.read_csv("submission_format.csv", index_col=[0, 1, 2])
submission.total_cases = np.concatenate([predictions])
submission.to_csv("predicted_values_3.0.csv")
###Output
_____no_output_____ |
content/en/modules/notebooks/mnist_with_pytorch.ipynb | ###Markdown
MNIST With PyTorch Training Import Libraries
###Code
import numpy as np
import torch
import torchvision
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
from torch import nn
from torch import optim
from time import time
import os
from google.colab import drive
###Output
_____no_output_____
###Markdown
Pre-Process DataHere we download the data using PyTorch data utils and transform the data by using a normalization function. PyTorch provides a data loader abstraction called a `DataLoader` where we can set the batch size, data shuffle per batch loading. Each data loader expecte a Pytorch Dataset. The DataSet abstraction and DataLoader usage can be found [here](https://pytorch.org/tutorials/recipes/recipes/loading_data_recipe.html)
###Code
# Data transformation function
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
# DataSet
train_data_set = datasets.MNIST('drive/My Drive/mnist/data/', download=True, train=True, transform=transform)
validation_data_set = datasets.MNIST('drive/My Drive/mnist/data/', download=True, train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=32, shuffle=True)
validation_loader = torch.utils.data.DataLoader(validation_data_set, batch_size=32, shuffle=True)
###Output
_____no_output_____
###Markdown
Define NetworkHere we select the matching input size compared to the network definition. Here data reshaping or layer reshaping must be done to match input data shape with the network input shape. Also we define a set of hidden unit sizes along with the output layers size. The `output_size` must match with the number of labels associated with the classification problem. The hidden units can be chosesn depending on the problem. `nn.Sequential` is one way to create the network. Here we stack a set of linear layers along with a softmax layer for the classification as the output layer.
###Code
input_size = 784
hidden_sizes = [128, 128, 64, 64]
output_size = 10
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
nn.ReLU(),
nn.Linear(hidden_sizes[1], hidden_sizes[2]),
nn.ReLU(),
nn.Linear(hidden_sizes[2], hidden_sizes[3]),
nn.ReLU(),
nn.Linear(hidden_sizes[3], output_size),
nn.LogSoftmax(dim=1))
print(model)
###Output
Sequential(
(0): Linear(in_features=784, out_features=128, bias=True)
(1): ReLU()
(2): Linear(in_features=128, out_features=128, bias=True)
(3): ReLU()
(4): Linear(in_features=128, out_features=64, bias=True)
(5): ReLU()
(6): Linear(in_features=64, out_features=64, bias=True)
(7): ReLU()
(8): Linear(in_features=64, out_features=10, bias=True)
(9): LogSoftmax(dim=1)
)
###Markdown
Define Loss Function and OptimizerRead more about [Loss Functions](https://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizers](https://pytorch.org/docs/stable/optim.html) supported by PyTorch.
###Code
criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.003, momentum=0.9)
###Output
_____no_output_____
###Markdown
Model Training
###Code
epochs = 5
for epoch in range(epochs):
loss_per_epoch = 0
for images, labels in train_loader:
images = images.view(images.shape[0], -1)
# Gradients cleared per batch
optimizer.zero_grad()
# Pass input to the model
output = model(images)
# Calculate loss after training compared to labels
loss = criterion(output, labels)
# backpropagation
loss.backward()
# optimizer step to update the weights
optimizer.step()
loss_per_epoch += loss.item()
average_loss = loss_per_epoch / len(train_loader)
print("Epoch {} - Training loss: {}".format(epoch, average_loss))
###Output
Epoch 0 - Training loss: 0.749698840479056
Epoch 1 - Training loss: 0.22315420204450687
Epoch 2 - Training loss: 0.15833292812642952
Epoch 3 - Training loss: 0.12278508187967042
Epoch 4 - Training loss: 0.10546507445381334
###Markdown
Model EvaluationSimilar to training data loader, we use the validation loader to load batch by batch and run the feed-forward network to get the expected prediction and compared to the label associated with the data point.
###Code
correct_predictions, all_count = 0, 0
# enumerate data from the data validation loader (loads a batch at a time)
for batch_id, (images,labels) in enumerate(validation_loader):
for i in range(len(labels)):
img = images[i].view(1, 784)
# at prediction stage, only feed-forward calculation is required.
with torch.no_grad():
logps = model(img)
# Output layer of the network uses a LogSoftMax layer
# Hence the probability must be calculated with the exponential values.
# The final layer returns an array of probabilities for each label
# Pick the maximum probability and the corresponding index
# The corresponding index is the predicted label
ps = torch.exp(logps)
probab = list(ps.numpy()[0])
pred_label = probab.index(max(probab))
true_label = labels.numpy()[i]
if(true_label == pred_label):
correct_predictions += 1
all_count += 1
print(f"Model Accuracy {(correct_predictions/all_count) * 100} %")
###Output
Model Accuracy 96.2 %
|
exercises/1_uniqueness.ipynb | ###Markdown
Exercise 1: Uniqueness The problemIn [the first tutorial](../tutorials/1_sphere_scatterer_null_field.ipynb), we looked at two formulations for a scattering problem with a Neumann boundary condition.In this exercise, we will investigate the uniqueness so solutions to the boundary integral formulations for this problem. The formulationIn this exercise we will use the null field approach (as in [the first tutorial](../tutorials/1_sphere_scatterer_null_field.ipynb). This uses the following representation formula and boundary integral equation. Representation formula$$p_\text{total} = \mathcal{D}p_\text{total} + p_\text{inc}$$where $\mathcal{D}$ is the double layer potential operator. Boundary integral equation$$(\mathsf{D}-\tfrac{1}{2}\mathsf{I})p_\text{total} = -p_\text{inc},$$where $\mathsf{D}$ is the double layer boundary operator, and $\mathsf{I}$ is the identity operator. Finding a resonanceThe code below plots the condition number of $\mathsf{D}-\tfrac{1}{2}\mathsf{I}$ for 30 values of $k$ between 2.5 and 3.5. There is a sharp increase in the condition number near 3.2.Adjust the limits use in `np.linspace` to approximate the value of $k$ for this spike to 4 or 5 decimal places. (For example, you might start be reducing the search to between 3.0 and 3.3, so `np.linspace(3.0, 3.3, 30)`.)
###Code
%matplotlib inline
import bempp.api
from bempp.api.operators.boundary import helmholtz, sparse
from bempp.api.operators.potential import helmholtz as helmholtz_potential
from bempp.api.linalg import gmres
import numpy as np
from matplotlib import pyplot as plt
grid = bempp.api.shapes.regular_sphere(3)
space = bempp.api.function_space(grid, "DP", 0)
identity = sparse.identity(space, space, space)
x_data = []
y_data = []
for k in np.linspace(2.5, 3.5, 30):
double_layer = helmholtz.double_layer(space, space, space, k)
x_data.append(k)
y_data.append(np.linalg.cond((double_layer - 0.5 * identity).weak_form().to_dense()))
plt.plot(x_data, y_data)
plt.xlabel("Wavenumber ($k$)")
plt.ylabel("Condition number")
plt.show()
###Output
_____no_output_____
###Markdown
The effect on the solutionThe code below has been copied from [the first tutorial](../tutorials/1_sphere_scatterer_null_field.ipynb) and the wavenumber has been changed to 3. The solution plot looks like a reasonable soluition.Change the value of the wavenumber to the resonance that you found above. Does the solution still look reasonable?
###Code
%matplotlib inline
import bempp.api
from bempp.api.operators.boundary import helmholtz, sparse
from bempp.api.operators.potential import helmholtz as helmholtz_potential
from bempp.api.linalg import gmres
import numpy as np
from matplotlib import pyplot as plt
k = 3.
grid = bempp.api.shapes.regular_sphere(3)
space = bempp.api.function_space(grid, "DP", 0)
identity = sparse.identity(space, space, space)
double_layer = helmholtz.double_layer(space, space, space, k)
@bempp.api.complex_callable
def p_inc_callable(x, n, domain_index, result):
result[0] = np.exp(1j * k * x[0])
p_inc = bempp.api.GridFunction(space, fun=p_inc_callable)
p_total, info = gmres(double_layer - 0.5 * identity, -p_inc, tol=1E-5)
Nx = 200
Ny = 200
xmin, xmax, ymin, ymax = [-3, 3, -3, 3]
plot_grid = np.mgrid[xmin:xmax:Nx * 1j, ymin:ymax:Ny * 1j]
points = np.vstack((plot_grid[0].ravel(),
plot_grid[1].ravel(),
np.zeros(plot_grid[0].size)))
p_inc_evaluated = np.real(np.exp(1j * k * points[0, :]))
p_inc_evaluated = p_inc_evaluated.reshape((Nx, Ny))
double_pot = helmholtz_potential.double_layer(space, points, k)
p_s = np.real(double_pot.evaluate(p_total))
p_s = p_s.reshape((Nx, Ny))
vmax = max(np.abs((p_inc_evaluated + p_s).flat))
fig = plt.figure(figsize=(10, 8))
plt.imshow(np.real((p_inc_evaluated + p_s).T), extent=[-3, 3, -3, 3],
cmap=plt.get_cmap("bwr"), vmin=-vmax, vmax=vmax)
plt.xlabel('x')
plt.ylabel('y')
plt.colorbar()
plt.title("Total wave in the plane z=0")
plt.show()
###Output
_____no_output_____
###Markdown
Obtaining the solution for this wavenumber: the Burton–Miller formulationThe Burton–Miller formulation can be used to obtain solutions to acoustic problems while avoiding spurious resonances. Representation formula$$p_\text{s} = \mathcal{D}p_\text{total},$$where $\mathcal{D}$ is the double layer potential operator. Boundary integral equation$$\left(\mathsf{D}-\tfrac{1}{2}\mathsf{I}+\frac{1}{\mathsf{i}k}\mathsf{H}\right)p_\text{total}=-p_\text{inc} + \frac{1}{\mathsf{i}k}\frac{\partial p_\text{inc}}{\partial \mathbf{n}},$$where $\mathsf{D}$ is the double layer boundary operator; $\mathsf{H}$ is the hypersingular boundary operator; $\mathsf{D}$ is the double layer boundary operator; and $\mathsf{I}$ is the identity operator. Solving with BemppYour task is to adapt and combine the example code in [the first tutorial](../tutorials/1_sphere_scatterer_null_field.ipynb) to solve the problem at the wavenumber you found above using the Burton–Miller formulation.We can create the hypersingular operator in Bempp by calling `helmholtz.hypersingular`. Complex number can be used in Python by writing (for example) `2 + 1j`, `3j`, or `1j * 3`. In order for the hypersingular operator to be defined, we must use a P1 space. The code needed to create the relevant operators is given below. Your task is to use these to implement the Burton–Miller formulation.Does the solution you obtain here look more reasonable that the solution above? You might like to adapt the previous example to use a P1 space to be sure that the resonances are still a problem with this alternative space.Hint: the normal derivative ($\frac{\partial p_\text{inc}}{\partial\mathbf{n}}$) in this case is $\mathrm{i}kn_0\mathrm{e}^{\mathrm{i}kx_0}$, where $\mathbf{n}=(n_0,n_1,n_2)$. If you're not sure how to implement this, have a look at [tutorial 2](../tutorials/2_sphere_scatterer_direct.ipynb).
###Code
%matplotlib inline
import bempp.api
from bempp.api.operators.boundary import helmholtz, sparse
from bempp.api.operators.potential import helmholtz as helmholtz_potential
from bempp.api.linalg import gmres
import numpy as np
from matplotlib import pyplot as plt
k = 1 # Enter your value here
grid = bempp.api.shapes.regular_sphere(3)
space = bempp.api.function_space(grid, "P", 1)
identity = sparse.identity(space, space, space)
double_layer = helmholtz.double_layer(space, space, space, k)
hypersingular = helmholtz.hypersingular(space, space, space, k)
###Output
_____no_output_____ |
notebooks/Parametric_UMAP/03.0-parametric-umap-mnist-embedding-convnet-with-reconstruction.ipynb | ###Markdown
Reconstruction with a custom network. This notebook extends the last notebook to simultaneously train a decoder network, which translates from embedding back into dataspace. It also shows you how to use validation data for the reconstruction network during training. load data
###Code
from tensorflow.keras.datasets import mnist
(train_images, Y_train), (test_images, Y_test) = mnist.load_data()
train_images = train_images.reshape((train_images.shape[0], -1))/255.
test_images = test_images.reshape((test_images.shape[0], -1))/255.
###Output
_____no_output_____
###Markdown
define the encoder network
###Code
import tensorflow as tf
dims = (28,28, 1)
n_components = 2
encoder = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=dims),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=(2, 2), activation="relu", padding="same"
),
tf.keras.layers.Conv2D(
filters=128, kernel_size=3, strides=(2, 2), activation="relu", padding="same"
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=512, activation="relu"),
tf.keras.layers.Dense(units=512, activation="relu"),
tf.keras.layers.Dense(units=n_components),
])
encoder.summary()
decoder = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(n_components)),
tf.keras.layers.Dense(units=512, activation="relu"),
tf.keras.layers.Dense(units=512, activation="relu"),
tf.keras.layers.Dense(units=7 * 7 * 128, activation="relu"),
tf.keras.layers.Reshape(target_shape=(7, 7, 128)),
tf.keras.layers.UpSampling2D((2)),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, padding="same", activation="relu"
),
tf.keras.layers.UpSampling2D((2)),
tf.keras.layers.Conv2D(
filters=32, kernel_size=3, padding="same", activation="relu"
),
])
decoder.summary()
###Output
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_3 (Dense) (None, 512) 1536
_________________________________________________________________
dense_4 (Dense) (None, 512) 262656
_________________________________________________________________
dense_5 (Dense) (None, 6272) 3217536
_________________________________________________________________
reshape (Reshape) (None, 7, 7, 128) 0
_________________________________________________________________
up_sampling2d (UpSampling2D) (None, 14, 14, 128) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 14, 14, 64) 73792
_________________________________________________________________
up_sampling2d_1 (UpSampling2 (None, 28, 28, 64) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 28, 28, 32) 18464
=================================================================
Total params: 3,573,984
Trainable params: 3,573,984
Non-trainable params: 0
_________________________________________________________________
###Markdown
create parametric umap model
###Code
from umap.parametric_umap import ParametricUMAP
embedder = ParametricUMAP(
encoder=encoder,
decoder=decoder,
dims=dims,
n_components=n_components,
n_training_epochs=5,
parametric_reconstruction= True,
reconstruction_validation=test_images,
verbose=True,
)
embedding = embedder.fit_transform(train_images)
###Output
ParametricUMAP(n_training_epochs=5,
optimizer=<tensorflow.python.keras.optimizer_v2.adam.Adam object at 0x7fdb3052e080>,
parametric_reconstruction=True,
reconstruction_validation=array([[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.]]))
Construct fuzzy simplicial set
Sun Aug 16 18:21:11 2020 Finding Nearest Neighbors
Sun Aug 16 18:21:11 2020 Building RP forest with 17 trees
Sun Aug 16 18:21:13 2020 parallel NN descent for 16 iterations
0 / 16
1 / 16
2 / 16
3 / 16
4 / 16
Sun Aug 16 18:21:24 2020 Finished Nearest Neighbor Search
Sun Aug 16 18:21:27 2020 Construct embedding
Epoch 1/50
725/725 [==============================] - 16s 23ms/step - loss: 0.4457 - reconstruction_loss: 0.2272 - umap_loss: 0.2186 - val_loss: 0.2024 - val_reconstruction_loss: 0.2024 - val_umap_loss: 0.0000e+00
Epoch 2/50
725/725 [==============================] - 16s 22ms/step - loss: 0.3586 - reconstruction_loss: 0.1895 - umap_loss: 0.1692 - val_loss: 0.1895 - val_reconstruction_loss: 0.1895 - val_umap_loss: 0.0000e+00
Epoch 3/50
725/725 [==============================] - 15s 20ms/step - loss: 0.3357 - reconstruction_loss: 0.1814 - umap_loss: 0.1543 - val_loss: 0.1862 - val_reconstruction_loss: 0.1862 - val_umap_loss: 0.0000e+00
Epoch 4/50
725/725 [==============================] - 14s 20ms/step - loss: 0.3245 - reconstruction_loss: 0.1782 - umap_loss: 0.1463 - val_loss: 0.1835 - val_reconstruction_loss: 0.1835 - val_umap_loss: 0.0000e+00
Epoch 5/50
725/725 [==============================] - 14s 19ms/step - loss: 0.3153 - reconstruction_loss: 0.1751 - umap_loss: 0.1402 - val_loss: 0.1811 - val_reconstruction_loss: 0.1811 - val_umap_loss: 0.0000e+00
Epoch 6/50
725/725 [==============================] - 14s 19ms/step - loss: 0.3092 - reconstruction_loss: 0.1733 - umap_loss: 0.1359 - val_loss: 0.1809 - val_reconstruction_loss: 0.1809 - val_umap_loss: 0.0000e+00
Epoch 7/50
725/725 [==============================] - 13s 18ms/step - loss: 0.3055 - reconstruction_loss: 0.1720 - umap_loss: 0.1335 - val_loss: 0.1794 - val_reconstruction_loss: 0.1794 - val_umap_loss: 0.0000e+00
Epoch 8/50
725/725 [==============================] - 13s 17ms/step - loss: 0.3023 - reconstruction_loss: 0.1707 - umap_loss: 0.1316 - val_loss: 0.1773 - val_reconstruction_loss: 0.1773 - val_umap_loss: 0.0000e+00
Epoch 9/50
725/725 [==============================] - 13s 18ms/step - loss: 0.2968 - reconstruction_loss: 0.1685 - umap_loss: 0.1283 - val_loss: 0.1764 - val_reconstruction_loss: 0.1764 - val_umap_loss: 0.0000e+00
Epoch 10/50
725/725 [==============================] - 12s 17ms/step - loss: 0.2953 - reconstruction_loss: 0.1680 - umap_loss: 0.1273 - val_loss: 0.1772 - val_reconstruction_loss: 0.1772 - val_umap_loss: 0.0000e+00
Epoch 11/50
725/725 [==============================] - 12s 17ms/step - loss: 0.2926 - reconstruction_loss: 0.1669 - umap_loss: 0.1256 - val_loss: 0.1756 - val_reconstruction_loss: 0.1756 - val_umap_loss: 0.0000e+00
Epoch 12/50
725/725 [==============================] - 13s 18ms/step - loss: 0.2920 - reconstruction_loss: 0.1667 - umap_loss: 0.1253 - val_loss: 0.1751 - val_reconstruction_loss: 0.1751 - val_umap_loss: 0.0000e+00
Epoch 13/50
725/725 [==============================] - 13s 18ms/step - loss: 0.2888 - reconstruction_loss: 0.1653 - umap_loss: 0.1234 - val_loss: 0.1743 - val_reconstruction_loss: 0.1743 - val_umap_loss: 0.0000e+00
Epoch 14/50
725/725 [==============================] - 13s 17ms/step - loss: 0.2883 - reconstruction_loss: 0.1653 - umap_loss: 0.1230 - val_loss: 0.1746 - val_reconstruction_loss: 0.1746 - val_umap_loss: 0.0000e+00
Epoch 15/50
725/725 [==============================] - 12s 17ms/step - loss: 0.2865 - reconstruction_loss: 0.1642 - umap_loss: 0.1223 - val_loss: 0.1737 - val_reconstruction_loss: 0.1737 - val_umap_loss: 0.0000e+00
Epoch 16/50
725/725 [==============================] - 12s 17ms/step - loss: 0.2853 - reconstruction_loss: 0.1638 - umap_loss: 0.1214 - val_loss: 0.1762 - val_reconstruction_loss: 0.1762 - val_umap_loss: 0.0000e+00
Epoch 17/50
725/725 [==============================] - 13s 18ms/step - loss: 0.2841 - reconstruction_loss: 0.1636 - umap_loss: 0.1205 - val_loss: 0.1729 - val_reconstruction_loss: 0.1729 - val_umap_loss: 0.0000e+00
Epoch 18/50
725/725 [==============================] - 14s 19ms/step - loss: 0.2834 - reconstruction_loss: 0.1630 - umap_loss: 0.1204 - val_loss: 0.1723 - val_reconstruction_loss: 0.1723 - val_umap_loss: 0.0000e+00
Epoch 19/50
725/725 [==============================] - 13s 17ms/step - loss: 0.2821 - reconstruction_loss: 0.1628 - umap_loss: 0.1194 - val_loss: 0.1723 - val_reconstruction_loss: 0.1723 - val_umap_loss: 0.0000e+00
Epoch 20/50
725/725 [==============================] - 15s 21ms/step - loss: 0.2814 - reconstruction_loss: 0.1622 - umap_loss: 0.1192 - val_loss: 0.1724 - val_reconstruction_loss: 0.1724 - val_umap_loss: 0.0000e+00
Epoch 21/50
725/725 [==============================] - 14s 19ms/step - loss: 0.2812 - reconstruction_loss: 0.1621 - umap_loss: 0.1190 - val_loss: 0.1710 - val_reconstruction_loss: 0.1710 - val_umap_loss: 0.0000e+00
Epoch 22/50
725/725 [==============================] - 14s 20ms/step - loss: 0.2806 - reconstruction_loss: 0.1618 - umap_loss: 0.1188 - val_loss: 0.1716 - val_reconstruction_loss: 0.1716 - val_umap_loss: 0.0000e+00
Epoch 23/50
725/725 [==============================] - 15s 21ms/step - loss: 0.2797 - reconstruction_loss: 0.1613 - umap_loss: 0.1184 - val_loss: 0.1713 - val_reconstruction_loss: 0.1713 - val_umap_loss: 0.0000e+00
Epoch 24/50
725/725 [==============================] - 14s 20ms/step - loss: 0.2792 - reconstruction_loss: 0.1612 - umap_loss: 0.1180 - val_loss: 0.1720 - val_reconstruction_loss: 0.1720 - val_umap_loss: 0.0000e+00
Epoch 25/50
725/725 [==============================] - 14s 19ms/step - loss: 0.2796 - reconstruction_loss: 0.1614 - umap_loss: 0.1181 - val_loss: 0.1713 - val_reconstruction_loss: 0.1713 - val_umap_loss: 0.0000e+00
Epoch 26/50
725/725 [==============================] - 15s 21ms/step - loss: 0.2782 - reconstruction_loss: 0.1607 - umap_loss: 0.1175 - val_loss: 0.1712 - val_reconstruction_loss: 0.1712 - val_umap_loss: 0.0000e+00
Epoch 27/50
725/725 [==============================] - 14s 19ms/step - loss: 0.2777 - reconstruction_loss: 0.1604 - umap_loss: 0.1174 - val_loss: 0.1733 - val_reconstruction_loss: 0.1733 - val_umap_loss: 0.0000e+00
Epoch 28/50
725/725 [==============================] - 13s 18ms/step - loss: 0.2779 - reconstruction_loss: 0.1603 - umap_loss: 0.1177 - val_loss: 0.1706 - val_reconstruction_loss: 0.1706 - val_umap_loss: 0.0000e+00
Epoch 29/50
725/725 [==============================] - 15s 21ms/step - loss: 0.2768 - reconstruction_loss: 0.1601 - umap_loss: 0.1167 - val_loss: 0.1712 - val_reconstruction_loss: 0.1712 - val_umap_loss: 0.0000e+00
Epoch 30/50
725/725 [==============================] - 14s 19ms/step - loss: 0.2763 - reconstruction_loss: 0.1599 - umap_loss: 0.1164 - val_loss: 0.1720 - val_reconstruction_loss: 0.1720 - val_umap_loss: 0.0000e+00
Epoch 31/50
725/725 [==============================] - 15s 21ms/step - loss: 0.2761 - reconstruction_loss: 0.1597 - umap_loss: 0.1164 - val_loss: 0.1702 - val_reconstruction_loss: 0.1702 - val_umap_loss: 0.0000e+00
Epoch 32/50
725/725 [==============================] - 14s 20ms/step - loss: 0.2751 - reconstruction_loss: 0.1593 - umap_loss: 0.1157 - val_loss: 0.1711 - val_reconstruction_loss: 0.1711 - val_umap_loss: 0.0000e+00
Epoch 33/50
725/725 [==============================] - 14s 20ms/step - loss: 0.2754 - reconstruction_loss: 0.1593 - umap_loss: 0.1162 - val_loss: 0.1709 - val_reconstruction_loss: 0.1709 - val_umap_loss: 0.0000e+00
Epoch 34/50
725/725 [==============================] - 14s 20ms/step - loss: 0.2755 - reconstruction_loss: 0.1595 - umap_loss: 0.1160 - val_loss: 0.1708 - val_reconstruction_loss: 0.1708 - val_umap_loss: 0.0000e+00
Epoch 35/50
725/725 [==============================] - 15s 21ms/step - loss: 0.2751 - reconstruction_loss: 0.1592 - umap_loss: 0.1158 - val_loss: 0.1699 - val_reconstruction_loss: 0.1699 - val_umap_loss: 0.0000e+00
Epoch 36/50
725/725 [==============================] - 14s 19ms/step - loss: 0.2742 - reconstruction_loss: 0.1591 - umap_loss: 0.1151 - val_loss: 0.1705 - val_reconstruction_loss: 0.1705 - val_umap_loss: 0.0000e+00
Epoch 37/50
725/725 [==============================] - 16s 21ms/step - loss: 0.2745 - reconstruction_loss: 0.1591 - umap_loss: 0.1154 - val_loss: 0.1705 - val_reconstruction_loss: 0.1705 - val_umap_loss: 0.0000e+00
Epoch 38/50
725/725 [==============================] - 14s 20ms/step - loss: 0.2745 - reconstruction_loss: 0.1589 - umap_loss: 0.1156 - val_loss: 0.1700 - val_reconstruction_loss: 0.1700 - val_umap_loss: 0.0000e+00
Epoch 39/50
725/725 [==============================] - 15s 20ms/step - loss: 0.2731 - reconstruction_loss: 0.1585 - umap_loss: 0.1146 - val_loss: 0.1698 - val_reconstruction_loss: 0.1698 - val_umap_loss: 0.0000e+00
Epoch 40/50
725/725 [==============================] - 14s 19ms/step - loss: 0.2746 - reconstruction_loss: 0.1591 - umap_loss: 0.1155 - val_loss: 0.1704 - val_reconstruction_loss: 0.1704 - val_umap_loss: 0.0000e+00
Epoch 41/50
725/725 [==============================] - 15s 21ms/step - loss: 0.2734 - reconstruction_loss: 0.1586 - umap_loss: 0.1148 - val_loss: 0.1707 - val_reconstruction_loss: 0.1707 - val_umap_loss: 0.0000e+00
Epoch 42/50
725/725 [==============================] - 16s 22ms/step - loss: 0.2738 - reconstruction_loss: 0.1588 - umap_loss: 0.1150 - val_loss: 0.1702 - val_reconstruction_loss: 0.1702 - val_umap_loss: 0.0000e+00
Epoch 43/50
725/725 [==============================] - 16s 22ms/step - loss: 0.2737 - reconstruction_loss: 0.1586 - umap_loss: 0.1150 - val_loss: 0.1699 - val_reconstruction_loss: 0.1699 - val_umap_loss: 0.0000e+00
Epoch 44/50
725/725 [==============================] - 16s 23ms/step - loss: 0.2728 - reconstruction_loss: 0.1581 - umap_loss: 0.1147 - val_loss: 0.1700 - val_reconstruction_loss: 0.1700 - val_umap_loss: 0.0000e+00
Epoch 45/50
725/725 [==============================] - 16s 22ms/step - loss: 0.2727 - reconstruction_loss: 0.1582 - umap_loss: 0.1145 - val_loss: 0.1695 - val_reconstruction_loss: 0.1695 - val_umap_loss: 0.0000e+00
Epoch 46/50
725/725 [==============================] - 16s 22ms/step - loss: 0.2726 - reconstruction_loss: 0.1582 - umap_loss: 0.1144 - val_loss: 0.1694 - val_reconstruction_loss: 0.1694 - val_umap_loss: 0.0000e+00
Epoch 47/50
725/725 [==============================] - 16s 22ms/step - loss: 0.2723 - reconstruction_loss: 0.1580 - umap_loss: 0.1143 - val_loss: 0.1695 - val_reconstruction_loss: 0.1695 - val_umap_loss: 0.0000e+00
Epoch 48/50
725/725 [==============================] - 16s 22ms/step - loss: 0.2722 - reconstruction_loss: 0.1580 - umap_loss: 0.1143 - val_loss: 0.1686 - val_reconstruction_loss: 0.1686 - val_umap_loss: 0.0000e+00
Epoch 49/50
725/725 [==============================] - 16s 22ms/step - loss: 0.2727 - reconstruction_loss: 0.1581 - umap_loss: 0.1145 - val_loss: 0.1698 - val_reconstruction_loss: 0.1698 - val_umap_loss: 0.0000e+00
Epoch 50/50
725/725 [==============================] - 15s 21ms/step - loss: 0.2718 - reconstruction_loss: 0.1578 - umap_loss: 0.1140 - val_loss: 0.1693 - val_reconstruction_loss: 0.1693 - val_umap_loss: 0.0000e+00
1875/1875 [==============================] - 1s 760us/step
Sun Aug 16 18:33:35 2020 Finished embedding
###Markdown
plot reconstructions
###Code
test_images_recon = embedder.inverse_transform(embedder.transform(test_images))
import numpy as np
nex = 10
fig, axs = plt.subplots(ncols=10, nrows=2, figsize=(nex, 2))
for i in range(nex):
axs[0, i].matshow(np.squeeze(test_images[i].reshape(28, 28, 1)), cmap=plt.cm.Greys)
axs[1, i].matshow(
tf.nn.sigmoid(np.squeeze(test_images_recon[i].reshape(28, 28, 1))),
cmap=plt.cm.Greys,
)
for ax in axs.flatten():
ax.axis("off")
###Output
_____no_output_____
###Markdown
plot results
###Code
embedding = embedder.embedding_
import matplotlib.pyplot as plt
fig, ax = plt.subplots( figsize=(8, 8))
sc = ax.scatter(
embedding[:, 0],
embedding[:, 1],
c=Y_train.astype(int),
cmap="tab10",
s=0.1,
alpha=0.5,
rasterized=True,
)
ax.axis('equal')
ax.set_title("UMAP in Tensorflow embedding", fontsize=20)
plt.colorbar(sc, ax=ax);
###Output
_____no_output_____
###Markdown
plotting loss
###Code
embedder._history.keys()
fig, axs = plt.subplots(ncols=2, figsize=(10,5))
ax = axs[0]
ax.plot(embedder._history['loss'])
ax.set_ylabel('Cross Entropy')
ax.set_xlabel('Epoch')
ax = axs[1]
ax.plot(embedder._history['reconstruction_loss'], label='train')
ax.plot(embedder._history['val_reconstruction_loss'], label='valid')
ax.legend()
ax.set_ylabel('Cross Entropy')
ax.set_xlabel('Epoch')
###Output
_____no_output_____
###Markdown
Reconstruction with a custom network. This notebook extends the last notebook to simultaneously train a decoder network, which translates from embedding back into dataspace. It also shows you how to use validation data for the reconstruction network during training. load data
###Code
from tensorflow.keras.datasets import mnist
(train_images, Y_train), (test_images, Y_test) = mnist.load_data()
train_images = train_images.reshape((train_images.shape[0], -1))/255.
test_images = test_images.reshape((test_images.shape[0], -1))/255.
###Output
_____no_output_____
###Markdown
define the encoder network
###Code
dims = (28,28, 1)
n_components = 2
encoder = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=dims),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=(2, 2), activation="relu", padding="same"
),
tf.keras.layers.Conv2D(
filters=128, kernel_size=3, strides=(2, 2), activation="relu", padding="same"
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=512, activation="relu"),
tf.keras.layers.Dense(units=512, activation="relu"),
tf.keras.layers.Dense(units=n_components),
])
encoder.summary()
decoder = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(n_components)),
tf.keras.layers.Dense(units=512, activation="relu"),
tf.keras.layers.Dense(units=512, activation="relu"),
tf.keras.layers.Dense(units=7 * 7 * 128, activation="relu"),
tf.keras.layers.Reshape(target_shape=(7, 7, 128)),
tf.keras.layers.UpSampling2D((2)),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, padding="same", activation="relu"
),
tf.keras.layers.UpSampling2D((2)),
tf.keras.layers.Conv2D(
filters=32, kernel_size=3, padding="same", activation="relu"
),
])
decoder.summary()
###Output
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_3 (Dense) (None, 512) 1536
_________________________________________________________________
dense_4 (Dense) (None, 512) 262656
_________________________________________________________________
dense_5 (Dense) (None, 6272) 3217536
_________________________________________________________________
reshape (Reshape) (None, 7, 7, 128) 0
_________________________________________________________________
up_sampling2d (UpSampling2D) (None, 14, 14, 128) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 14, 14, 64) 73792
_________________________________________________________________
up_sampling2d_1 (UpSampling2 (None, 28, 28, 64) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 28, 28, 32) 18464
=================================================================
Total params: 3,573,984
Trainable params: 3,573,984
Non-trainable params: 0
_________________________________________________________________
###Markdown
create parametric umap model
###Code
from umap.parametric_umap import ParametricUMAP
embedder = ParametricUMAP(
encoder=encoder,
decoder=decoder,
dims=dims,
n_training_epochs=5,
parametric_reconstruction= True,
reconstruction_validation=test_images,
verbose=True,
)
embedding = embedder.fit_transform(train_images)
###Output
ParametricUMAP(n_training_epochs=5,
optimizer=<tensorflow.python.keras.optimizer_v2.adam.Adam object at 0x7fdb3052e080>,
parametric_reconstruction=True,
reconstruction_validation=array([[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.]]))
Construct fuzzy simplicial set
Sun Aug 16 18:21:11 2020 Finding Nearest Neighbors
Sun Aug 16 18:21:11 2020 Building RP forest with 17 trees
Sun Aug 16 18:21:13 2020 parallel NN descent for 16 iterations
0 / 16
1 / 16
2 / 16
3 / 16
4 / 16
Sun Aug 16 18:21:24 2020 Finished Nearest Neighbor Search
Sun Aug 16 18:21:27 2020 Construct embedding
Epoch 1/50
725/725 [==============================] - 16s 23ms/step - loss: 0.4457 - reconstruction_loss: 0.2272 - umap_loss: 0.2186 - val_loss: 0.2024 - val_reconstruction_loss: 0.2024 - val_umap_loss: 0.0000e+00
Epoch 2/50
725/725 [==============================] - 16s 22ms/step - loss: 0.3586 - reconstruction_loss: 0.1895 - umap_loss: 0.1692 - val_loss: 0.1895 - val_reconstruction_loss: 0.1895 - val_umap_loss: 0.0000e+00
Epoch 3/50
725/725 [==============================] - 15s 20ms/step - loss: 0.3357 - reconstruction_loss: 0.1814 - umap_loss: 0.1543 - val_loss: 0.1862 - val_reconstruction_loss: 0.1862 - val_umap_loss: 0.0000e+00
Epoch 4/50
725/725 [==============================] - 14s 20ms/step - loss: 0.3245 - reconstruction_loss: 0.1782 - umap_loss: 0.1463 - val_loss: 0.1835 - val_reconstruction_loss: 0.1835 - val_umap_loss: 0.0000e+00
Epoch 5/50
725/725 [==============================] - 14s 19ms/step - loss: 0.3153 - reconstruction_loss: 0.1751 - umap_loss: 0.1402 - val_loss: 0.1811 - val_reconstruction_loss: 0.1811 - val_umap_loss: 0.0000e+00
Epoch 6/50
725/725 [==============================] - 14s 19ms/step - loss: 0.3092 - reconstruction_loss: 0.1733 - umap_loss: 0.1359 - val_loss: 0.1809 - val_reconstruction_loss: 0.1809 - val_umap_loss: 0.0000e+00
Epoch 7/50
725/725 [==============================] - 13s 18ms/step - loss: 0.3055 - reconstruction_loss: 0.1720 - umap_loss: 0.1335 - val_loss: 0.1794 - val_reconstruction_loss: 0.1794 - val_umap_loss: 0.0000e+00
Epoch 8/50
725/725 [==============================] - 13s 17ms/step - loss: 0.3023 - reconstruction_loss: 0.1707 - umap_loss: 0.1316 - val_loss: 0.1773 - val_reconstruction_loss: 0.1773 - val_umap_loss: 0.0000e+00
Epoch 9/50
725/725 [==============================] - 13s 18ms/step - loss: 0.2968 - reconstruction_loss: 0.1685 - umap_loss: 0.1283 - val_loss: 0.1764 - val_reconstruction_loss: 0.1764 - val_umap_loss: 0.0000e+00
Epoch 10/50
725/725 [==============================] - 12s 17ms/step - loss: 0.2953 - reconstruction_loss: 0.1680 - umap_loss: 0.1273 - val_loss: 0.1772 - val_reconstruction_loss: 0.1772 - val_umap_loss: 0.0000e+00
Epoch 11/50
725/725 [==============================] - 12s 17ms/step - loss: 0.2926 - reconstruction_loss: 0.1669 - umap_loss: 0.1256 - val_loss: 0.1756 - val_reconstruction_loss: 0.1756 - val_umap_loss: 0.0000e+00
Epoch 12/50
725/725 [==============================] - 13s 18ms/step - loss: 0.2920 - reconstruction_loss: 0.1667 - umap_loss: 0.1253 - val_loss: 0.1751 - val_reconstruction_loss: 0.1751 - val_umap_loss: 0.0000e+00
Epoch 13/50
725/725 [==============================] - 13s 18ms/step - loss: 0.2888 - reconstruction_loss: 0.1653 - umap_loss: 0.1234 - val_loss: 0.1743 - val_reconstruction_loss: 0.1743 - val_umap_loss: 0.0000e+00
Epoch 14/50
725/725 [==============================] - 13s 17ms/step - loss: 0.2883 - reconstruction_loss: 0.1653 - umap_loss: 0.1230 - val_loss: 0.1746 - val_reconstruction_loss: 0.1746 - val_umap_loss: 0.0000e+00
Epoch 15/50
725/725 [==============================] - 12s 17ms/step - loss: 0.2865 - reconstruction_loss: 0.1642 - umap_loss: 0.1223 - val_loss: 0.1737 - val_reconstruction_loss: 0.1737 - val_umap_loss: 0.0000e+00
Epoch 16/50
725/725 [==============================] - 12s 17ms/step - loss: 0.2853 - reconstruction_loss: 0.1638 - umap_loss: 0.1214 - val_loss: 0.1762 - val_reconstruction_loss: 0.1762 - val_umap_loss: 0.0000e+00
Epoch 17/50
725/725 [==============================] - 13s 18ms/step - loss: 0.2841 - reconstruction_loss: 0.1636 - umap_loss: 0.1205 - val_loss: 0.1729 - val_reconstruction_loss: 0.1729 - val_umap_loss: 0.0000e+00
Epoch 18/50
725/725 [==============================] - 14s 19ms/step - loss: 0.2834 - reconstruction_loss: 0.1630 - umap_loss: 0.1204 - val_loss: 0.1723 - val_reconstruction_loss: 0.1723 - val_umap_loss: 0.0000e+00
Epoch 19/50
725/725 [==============================] - 13s 17ms/step - loss: 0.2821 - reconstruction_loss: 0.1628 - umap_loss: 0.1194 - val_loss: 0.1723 - val_reconstruction_loss: 0.1723 - val_umap_loss: 0.0000e+00
Epoch 20/50
725/725 [==============================] - 15s 21ms/step - loss: 0.2814 - reconstruction_loss: 0.1622 - umap_loss: 0.1192 - val_loss: 0.1724 - val_reconstruction_loss: 0.1724 - val_umap_loss: 0.0000e+00
Epoch 21/50
725/725 [==============================] - 14s 19ms/step - loss: 0.2812 - reconstruction_loss: 0.1621 - umap_loss: 0.1190 - val_loss: 0.1710 - val_reconstruction_loss: 0.1710 - val_umap_loss: 0.0000e+00
Epoch 22/50
725/725 [==============================] - 14s 20ms/step - loss: 0.2806 - reconstruction_loss: 0.1618 - umap_loss: 0.1188 - val_loss: 0.1716 - val_reconstruction_loss: 0.1716 - val_umap_loss: 0.0000e+00
Epoch 23/50
725/725 [==============================] - 15s 21ms/step - loss: 0.2797 - reconstruction_loss: 0.1613 - umap_loss: 0.1184 - val_loss: 0.1713 - val_reconstruction_loss: 0.1713 - val_umap_loss: 0.0000e+00
Epoch 24/50
725/725 [==============================] - 14s 20ms/step - loss: 0.2792 - reconstruction_loss: 0.1612 - umap_loss: 0.1180 - val_loss: 0.1720 - val_reconstruction_loss: 0.1720 - val_umap_loss: 0.0000e+00
Epoch 25/50
725/725 [==============================] - 14s 19ms/step - loss: 0.2796 - reconstruction_loss: 0.1614 - umap_loss: 0.1181 - val_loss: 0.1713 - val_reconstruction_loss: 0.1713 - val_umap_loss: 0.0000e+00
Epoch 26/50
725/725 [==============================] - 15s 21ms/step - loss: 0.2782 - reconstruction_loss: 0.1607 - umap_loss: 0.1175 - val_loss: 0.1712 - val_reconstruction_loss: 0.1712 - val_umap_loss: 0.0000e+00
Epoch 27/50
725/725 [==============================] - 14s 19ms/step - loss: 0.2777 - reconstruction_loss: 0.1604 - umap_loss: 0.1174 - val_loss: 0.1733 - val_reconstruction_loss: 0.1733 - val_umap_loss: 0.0000e+00
Epoch 28/50
725/725 [==============================] - 13s 18ms/step - loss: 0.2779 - reconstruction_loss: 0.1603 - umap_loss: 0.1177 - val_loss: 0.1706 - val_reconstruction_loss: 0.1706 - val_umap_loss: 0.0000e+00
Epoch 29/50
725/725 [==============================] - 15s 21ms/step - loss: 0.2768 - reconstruction_loss: 0.1601 - umap_loss: 0.1167 - val_loss: 0.1712 - val_reconstruction_loss: 0.1712 - val_umap_loss: 0.0000e+00
Epoch 30/50
725/725 [==============================] - 14s 19ms/step - loss: 0.2763 - reconstruction_loss: 0.1599 - umap_loss: 0.1164 - val_loss: 0.1720 - val_reconstruction_loss: 0.1720 - val_umap_loss: 0.0000e+00
Epoch 31/50
725/725 [==============================] - 15s 21ms/step - loss: 0.2761 - reconstruction_loss: 0.1597 - umap_loss: 0.1164 - val_loss: 0.1702 - val_reconstruction_loss: 0.1702 - val_umap_loss: 0.0000e+00
Epoch 32/50
725/725 [==============================] - 14s 20ms/step - loss: 0.2751 - reconstruction_loss: 0.1593 - umap_loss: 0.1157 - val_loss: 0.1711 - val_reconstruction_loss: 0.1711 - val_umap_loss: 0.0000e+00
Epoch 33/50
725/725 [==============================] - 14s 20ms/step - loss: 0.2754 - reconstruction_loss: 0.1593 - umap_loss: 0.1162 - val_loss: 0.1709 - val_reconstruction_loss: 0.1709 - val_umap_loss: 0.0000e+00
Epoch 34/50
725/725 [==============================] - 14s 20ms/step - loss: 0.2755 - reconstruction_loss: 0.1595 - umap_loss: 0.1160 - val_loss: 0.1708 - val_reconstruction_loss: 0.1708 - val_umap_loss: 0.0000e+00
Epoch 35/50
725/725 [==============================] - 15s 21ms/step - loss: 0.2751 - reconstruction_loss: 0.1592 - umap_loss: 0.1158 - val_loss: 0.1699 - val_reconstruction_loss: 0.1699 - val_umap_loss: 0.0000e+00
Epoch 36/50
725/725 [==============================] - 14s 19ms/step - loss: 0.2742 - reconstruction_loss: 0.1591 - umap_loss: 0.1151 - val_loss: 0.1705 - val_reconstruction_loss: 0.1705 - val_umap_loss: 0.0000e+00
Epoch 37/50
725/725 [==============================] - 16s 21ms/step - loss: 0.2745 - reconstruction_loss: 0.1591 - umap_loss: 0.1154 - val_loss: 0.1705 - val_reconstruction_loss: 0.1705 - val_umap_loss: 0.0000e+00
Epoch 38/50
725/725 [==============================] - 14s 20ms/step - loss: 0.2745 - reconstruction_loss: 0.1589 - umap_loss: 0.1156 - val_loss: 0.1700 - val_reconstruction_loss: 0.1700 - val_umap_loss: 0.0000e+00
Epoch 39/50
725/725 [==============================] - 15s 20ms/step - loss: 0.2731 - reconstruction_loss: 0.1585 - umap_loss: 0.1146 - val_loss: 0.1698 - val_reconstruction_loss: 0.1698 - val_umap_loss: 0.0000e+00
Epoch 40/50
725/725 [==============================] - 14s 19ms/step - loss: 0.2746 - reconstruction_loss: 0.1591 - umap_loss: 0.1155 - val_loss: 0.1704 - val_reconstruction_loss: 0.1704 - val_umap_loss: 0.0000e+00
Epoch 41/50
725/725 [==============================] - 15s 21ms/step - loss: 0.2734 - reconstruction_loss: 0.1586 - umap_loss: 0.1148 - val_loss: 0.1707 - val_reconstruction_loss: 0.1707 - val_umap_loss: 0.0000e+00
Epoch 42/50
725/725 [==============================] - 16s 22ms/step - loss: 0.2738 - reconstruction_loss: 0.1588 - umap_loss: 0.1150 - val_loss: 0.1702 - val_reconstruction_loss: 0.1702 - val_umap_loss: 0.0000e+00
Epoch 43/50
725/725 [==============================] - 16s 22ms/step - loss: 0.2737 - reconstruction_loss: 0.1586 - umap_loss: 0.1150 - val_loss: 0.1699 - val_reconstruction_loss: 0.1699 - val_umap_loss: 0.0000e+00
Epoch 44/50
725/725 [==============================] - 16s 23ms/step - loss: 0.2728 - reconstruction_loss: 0.1581 - umap_loss: 0.1147 - val_loss: 0.1700 - val_reconstruction_loss: 0.1700 - val_umap_loss: 0.0000e+00
Epoch 45/50
725/725 [==============================] - 16s 22ms/step - loss: 0.2727 - reconstruction_loss: 0.1582 - umap_loss: 0.1145 - val_loss: 0.1695 - val_reconstruction_loss: 0.1695 - val_umap_loss: 0.0000e+00
Epoch 46/50
725/725 [==============================] - 16s 22ms/step - loss: 0.2726 - reconstruction_loss: 0.1582 - umap_loss: 0.1144 - val_loss: 0.1694 - val_reconstruction_loss: 0.1694 - val_umap_loss: 0.0000e+00
Epoch 47/50
725/725 [==============================] - 16s 22ms/step - loss: 0.2723 - reconstruction_loss: 0.1580 - umap_loss: 0.1143 - val_loss: 0.1695 - val_reconstruction_loss: 0.1695 - val_umap_loss: 0.0000e+00
Epoch 48/50
725/725 [==============================] - 16s 22ms/step - loss: 0.2722 - reconstruction_loss: 0.1580 - umap_loss: 0.1143 - val_loss: 0.1686 - val_reconstruction_loss: 0.1686 - val_umap_loss: 0.0000e+00
Epoch 49/50
725/725 [==============================] - 16s 22ms/step - loss: 0.2727 - reconstruction_loss: 0.1581 - umap_loss: 0.1145 - val_loss: 0.1698 - val_reconstruction_loss: 0.1698 - val_umap_loss: 0.0000e+00
Epoch 50/50
725/725 [==============================] - 15s 21ms/step - loss: 0.2718 - reconstruction_loss: 0.1578 - umap_loss: 0.1140 - val_loss: 0.1693 - val_reconstruction_loss: 0.1693 - val_umap_loss: 0.0000e+00
1875/1875 [==============================] - 1s 760us/step
Sun Aug 16 18:33:35 2020 Finished embedding
###Markdown
plot reconstructions
###Code
test_images_recon = embedder.inverse_transform(embedder.transform(test_images))
import numpy as np
nex = 10
fig, axs = plt.subplots(ncols=10, nrows=2, figsize=(nex, 2))
for i in range(nex):
axs[0, i].matshow(np.squeeze(test_images[i].reshape(28, 28, 1)), cmap=plt.cm.Greys)
axs[1, i].matshow(
tf.nn.sigmoid(np.squeeze(test_images_recon[i].reshape(28, 28, 1))),
cmap=plt.cm.Greys,
)
for ax in axs.flatten():
ax.axis("off")
###Output
_____no_output_____
###Markdown
plot results
###Code
embedding = embedder.embedding_
import matplotlib.pyplot as plt
fig, ax = plt.subplots( figsize=(8, 8))
sc = ax.scatter(
embedding[:, 0],
embedding[:, 1],
c=Y_train.astype(int),
cmap="tab10",
s=0.1,
alpha=0.5,
rasterized=True,
)
ax.axis('equal')
ax.set_title("UMAP in Tensorflow embedding", fontsize=20)
plt.colorbar(sc, ax=ax);
###Output
_____no_output_____
###Markdown
plotting loss
###Code
embedder._history.keys()
fig, axs = plt.subplots(ncols=2, figsize=(10,5))
ax = axs[0]
ax.plot(embedder._history['loss'])
ax.set_ylabel('Cross Entropy')
ax.set_xlabel('Epoch')
ax = axs[1]
ax.plot(embedder._history['reconstruction_loss'], label='train')
ax.plot(embedder._history['val_reconstruction_loss'], label='valid')
ax.legend()
ax.set_ylabel('Cross Entropy')
ax.set_xlabel('Epoch')
###Output
_____no_output_____
###Markdown
Reconstruction with a custom network. This notebook extends the last notebook to simultaneously train a decoder network, which translates from embedding back into dataspace. It also shows you how to use validation data for the reconstruction network during training. load data
###Code
import tensorflow as tf
tf.__version__
from tensorflow.keras.datasets import mnist
(train_images, Y_train), (test_images, Y_test) = mnist.load_data()
train_images = train_images.reshape((train_images.shape[0], -1))/255.
test_images = test_images.reshape((test_images.shape[0], -1))/255.
###Output
_____no_output_____
###Markdown
define the encoder network
###Code
import tensorflow as tf
tf.__version__
tf.config.list_physical_devices('GPU')
dims = (28,28, 1)
n_components = 2
encoder = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=dims),
tf.keras.layers.Conv2D(
filters=32, kernel_size=3, strides=(2, 2), activation="relu", padding="same"
),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=(2, 2), activation="relu", padding="same"
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=128, activation="relu"),
tf.keras.layers.Dense(units=128, activation="relu"),
tf.keras.layers.Dense(units=n_components),
])
encoder.summary()
decoder = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(n_components)),
tf.keras.layers.Dense(units=128, activation="relu"),
tf.keras.layers.Dense(units=7 * 7 * 128, activation="relu"),
tf.keras.layers.Reshape(target_shape=(7, 7, 128)),
tf.keras.layers.Conv2DTranspose(
filters=64, kernel_size=3, strides=(2, 2), padding="SAME", activation="relu"
),
tf.keras.layers.Conv2DTranspose(
filters=32, kernel_size=3, strides=(2, 2), padding="SAME", activation="relu"
),
tf.keras.layers.Conv2DTranspose(
filters=1, kernel_size=3, strides=(1, 1), padding="SAME", activation="sigmoid"
)
])
decoder.summary()
###Output
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense_3 (Dense) (None, 128) 384
_________________________________________________________________
dense_4 (Dense) (None, 6272) 809088
_________________________________________________________________
reshape (Reshape) (None, 7, 7, 128) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 14, 14, 64) 73792
_________________________________________________________________
conv2d_transpose_1 (Conv2DTr (None, 28, 28, 32) 18464
_________________________________________________________________
conv2d_transpose_2 (Conv2DTr (None, 28, 28, 1) 289
=================================================================
Total params: 902,017
Trainable params: 902,017
Non-trainable params: 0
_________________________________________________________________
###Markdown
create parametric umap model
###Code
from umap.parametric_umap import ParametricUMAP
embedder = ParametricUMAP(
encoder=encoder,
decoder=decoder,
dims=dims,
n_components=n_components,
n_training_epochs=1, # dicates how many total training epochs to run
n_epochs = 50, # dicates how many times edges are trained per 'epoch' to keep consistent with non-parametric UMAP
parametric_reconstruction= True,
reconstruction_validation=test_images,
parametric_reconstruction_loss_fcn = tf.keras.losses.MSE,
verbose=True,
)
train_images.shape
embedding = embedder.fit_transform(train_images)
###Output
ParametricUMAP(decoder=<tensorflow.python.keras.engine.sequential.Sequential object at 0x7f99f6495ca0>,
dims=(28, 28, 1),
encoder=<tensorflow.python.keras.engine.sequential.Sequential object at 0x7f99f6f21a60>,
optimizer=<tensorflow.python.keras.optimizer_v2.adam.Adam object at 0x7f993407f400>,
parametric_reconstruction=True,
parametric_reconstruction_loss_fcn=<function mean_squared_error at 0x7f9a043c0ca0>,
reconstruction_validation=array([[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.]]))
Construct fuzzy simplicial set
Tue Apr 20 13:48:56 2021 Finding Nearest Neighbors
Tue Apr 20 13:48:56 2021 Building RP forest with 17 trees
Tue Apr 20 13:49:03 2021 parallel NN descent for 16 iterations
0 / 16
1 / 16
2 / 16
3 / 16
4 / 16
Tue Apr 20 13:49:19 2021 Finished Nearest Neighbor Search
Tue Apr 20 13:49:23 2021 Construct embedding
Epoch 1/10
1905/1905 [==============================] - 167s 52ms/step - loss: 0.2818 - reconstruction_loss: 0.0570 - umap_loss: 0.2248 - val_loss: 0.0411 - val_reconstruction_loss: 0.0411 - val_umap_loss: 0.0000e+00
Epoch 2/10
1905/1905 [==============================] - 53s 28ms/step - loss: 0.1698 - reconstruction_loss: 0.0394 - umap_loss: 0.1304 - val_loss: 0.0381 - val_reconstruction_loss: 0.0381 - val_umap_loss: 0.0000e+00
Epoch 3/10
1905/1905 [==============================] - 54s 28ms/step - loss: 0.1550 - reconstruction_loss: 0.0373 - umap_loss: 0.1178 - val_loss: 0.0382 - val_reconstruction_loss: 0.0382 - val_umap_loss: 0.0000e+00
Epoch 4/10
1905/1905 [==============================] - 53s 28ms/step - loss: 0.1465 - reconstruction_loss: 0.0354 - umap_loss: 0.1112 - val_loss: 0.0369 - val_reconstruction_loss: 0.0369 - val_umap_loss: 0.0000e+00
Epoch 5/10
1905/1905 [==============================] - 52s 27ms/step - loss: 0.1417 - reconstruction_loss: 0.0345 - umap_loss: 0.1072 - val_loss: 0.0361 - val_reconstruction_loss: 0.0361 - val_umap_loss: 0.0000e+00
Epoch 6/10
1905/1905 [==============================] - 53s 28ms/step - loss: 0.1386 - reconstruction_loss: 0.0343 - umap_loss: 0.1043 - val_loss: 0.0383 - val_reconstruction_loss: 0.0383 - val_umap_loss: 0.0000e+00
Epoch 7/10
1905/1905 [==============================] - 52s 27ms/step - loss: 0.1361 - reconstruction_loss: 0.0339 - umap_loss: 0.1022 - val_loss: 0.0358 - val_reconstruction_loss: 0.0358 - val_umap_loss: 0.0000e+00
Epoch 8/10
1905/1905 [==============================] - 54s 28ms/step - loss: 0.1345 - reconstruction_loss: 0.0337 - umap_loss: 0.1008 - val_loss: 0.0371 - val_reconstruction_loss: 0.0371 - val_umap_loss: 0.0000e+00
Epoch 9/10
1905/1905 [==============================] - 55s 29ms/step - loss: 0.1332 - reconstruction_loss: 0.0336 - umap_loss: 0.0995 - val_loss: 0.0369 - val_reconstruction_loss: 0.0369 - val_umap_loss: 0.0000e+00
Epoch 10/10
1905/1905 [==============================] - 53s 28ms/step - loss: 0.1326 - reconstruction_loss: 0.0335 - umap_loss: 0.0991 - val_loss: 0.0354 - val_reconstruction_loss: 0.0354 - val_umap_loss: 0.0000e+00
1875/1875 [==============================] - 2s 1ms/step
Tue Apr 20 14:00:29 2021 Finished embedding
###Markdown
plot reconstructions
###Code
test_images_recon = embedder.inverse_transform(embedder.transform(test_images.reshape(len(test_images), 28,28,1)))
import numpy as np
import matplotlib.pyplot as plt
np.min(test_images), np.max(test_images)
nex = 10
fig, axs = plt.subplots(ncols=10, nrows=2, figsize=(nex, 2))
for i in range(nex):
axs[0, i].matshow(np.squeeze(test_images[i].reshape(28, 28, 1)), cmap=plt.cm.Greys)
axs[1, i].matshow(
np.squeeze(test_images_recon[i].reshape(28, 28, 1)),
cmap=plt.cm.Greys, vmin = 0, vmax = 1
)
for ax in axs.flatten():
ax.axis("off")
###Output
_____no_output_____
###Markdown
plot results
###Code
embedding = embedder.embedding_
import matplotlib.pyplot as plt
fig, ax = plt.subplots( figsize=(8, 8))
sc = ax.scatter(
embedding[:, 0],
embedding[:, 1],
c=Y_train.astype(int),
cmap="tab10",
s=0.1,
alpha=0.5,
rasterized=True,
)
ax.axis('equal')
ax.set_title("UMAP in Tensorflow embedding", fontsize=20)
plt.colorbar(sc, ax=ax);
###Output
_____no_output_____
###Markdown
plotting loss
###Code
embedder._history.keys()
fig, axs = plt.subplots(ncols=2, figsize=(10,5))
ax = axs[0]
ax.plot(embedder._history['loss'])
ax.set_ylabel('Cross Entropy')
ax.set_xlabel('Epoch')
ax = axs[1]
ax.plot(embedder._history['reconstruction_loss'], label='train')
ax.plot(embedder._history['val_reconstruction_loss'], label='valid')
ax.legend()
ax.set_ylabel('Cross Entropy')
ax.set_xlabel('Epoch')
###Output
_____no_output_____ |
chapter11_part03_transformer.ipynb | ###Markdown
This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.**If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**This notebook was generated for TensorFlow 2.6.[](https://colab.research.google.com/github/achimr/deep-learning-with-python-notebooks/blob/master/chapter11_part03_transformer.ipynb) The Transformer architecture Understanding self-attention Generalized self-attention: the query-key-value model Multi-head attention The Transformer encoder **Getting the data**
###Code
!curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -xf aclImdb_v1.tar.gz
!rm -r aclImdb/train/unsup
###Output
_____no_output_____
###Markdown
**Preparing the data**
###Code
import os, pathlib, shutil, random
from tensorflow import keras
batch_size = 32
base_dir = pathlib.Path("aclImdb")
val_dir = base_dir / "val"
train_dir = base_dir / "train"
for category in ("neg", "pos"):
os.makedirs(val_dir / category)
files = os.listdir(train_dir / category)
random.Random(1337).shuffle(files)
num_val_samples = int(0.2 * len(files))
val_files = files[-num_val_samples:]
for fname in val_files:
shutil.move(train_dir / category / fname,
val_dir / category / fname)
train_ds = keras.utils.text_dataset_from_directory(
"aclImdb/train", batch_size=batch_size
)
val_ds = keras.utils.text_dataset_from_directory(
"aclImdb/val", batch_size=batch_size
)
test_ds = keras.utils.text_dataset_from_directory(
"aclImdb/test", batch_size=batch_size
)
text_only_train_ds = train_ds.map(lambda x, y: x)
###Output
_____no_output_____
###Markdown
**Vectorizing the data**
###Code
from tensorflow.keras import layers
max_length = 600
max_tokens = 20000
text_vectorization = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_length,
)
text_vectorization.adapt(text_only_train_ds)
int_train_ds = train_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
int_val_ds = val_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
int_test_ds = test_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
###Output
_____no_output_____
###Markdown
**Transformer encoder implemented as a subclassed `Layer`**
###Code
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class TransformerEncoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim)
self.dense_proj = keras.Sequential(
[layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
def call(self, inputs, mask=None):
if mask is not None:
mask = mask[:, tf.newaxis, :]
attention_output = self.attention(
inputs, inputs, attention_mask=mask)
proj_input = self.layernorm_1(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return self.layernorm_2(proj_input + proj_output)
def get_config(self):
config = super().get_config()
config.update({
"embed_dim": self.embed_dim,
"num_heads": self.num_heads,
"dense_dim": self.dense_dim,
})
return config
###Output
_____no_output_____
###Markdown
**Using the Transformer encoder for text classification**
###Code
vocab_size = 20000
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
###Output
_____no_output_____
###Markdown
**Training and evaluating the Transformer encoder based model**
###Code
callbacks = [
keras.callbacks.ModelCheckpoint("transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
_____no_output_____
###Markdown
Issue: Transformer encoder by itself does not recognize word orderBack to slides ↩ Using positional encoding to re-inject order information **Implementing positional embedding as a subclassed layer**
###Code
class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, input_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.token_embeddings = layers.Embedding(
input_dim=input_dim, output_dim=output_dim)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=output_dim)
self.sequence_length = sequence_length
self.input_dim = input_dim
self.output_dim = output_dim
def call(self, inputs):
length = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_tokens = self.token_embeddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def compute_mask(self, inputs, mask=None):
return tf.math.not_equal(inputs, 0)
def get_config(self):
config = super().get_config()
config.update({
"output_dim": self.output_dim,
"sequence_length": self.sequence_length,
"input_dim": self.input_dim,
})
return config
###Output
_____no_output_____
###Markdown
Putting it all together: A text-classification Transformer **Combining the Transformer encoder with positional embedding**
###Code
vocab_size = 20000
sequence_length = 600
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
callbacks = [
keras.callbacks.ModelCheckpoint("full_transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"full_transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder,
"PositionalEmbedding": PositionalEmbedding})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
_____no_output_____
###Markdown
This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.**If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**This notebook was generated for TensorFlow 2.6. The Transformer architecture Understanding self-attention Generalized self-attention: the query-key-value model Multi-Head attention The Transformer encoder **Getting the data**
###Code
!curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -xf aclImdb_v1.tar.gz
!rm -r aclImdb/train/unsup
###Output
_____no_output_____
###Markdown
**Preparing the data**
###Code
import os, pathlib, shutil, random
from tensorflow import keras
batch_size = 32
base_dir = pathlib.Path("aclImdb")
val_dir = base_dir / "val"
train_dir = base_dir / "train"
for category in ("neg", "pos"):
os.makedirs(val_dir / category)
files = os.listdir(train_dir / category)
random.Random(1337).shuffle(files)
num_val_samples = int(0.2 * len(files))
val_files = files[-num_val_samples:]
for fname in val_files:
shutil.move(train_dir / category / fname,
val_dir / category / fname)
train_ds = keras.utils.text_dataset_from_directory(
"aclImdb/train", batch_size=batch_size
)
val_ds = keras.utils.text_dataset_from_directory(
"aclImdb/val", batch_size=batch_size
)
test_ds = keras.utils.text_dataset_from_directory(
"aclImdb/test", batch_size=batch_size
)
text_only_train_ds = train_ds.map(lambda x, y: x)
###Output
_____no_output_____
###Markdown
**Vectorizing the data**
###Code
from tensorflow.keras import layers
max_length = 600
max_tokens = 20000
text_vectorization = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_length,
)
text_vectorization.adapt(text_only_train_ds)
int_train_ds = train_ds.map(lambda x, y: (text_vectorization(x), y))
int_val_ds = val_ds.map(lambda x, y: (text_vectorization(x), y))
int_test_ds = test_ds.map(lambda x, y: (text_vectorization(x), y))
###Output
_____no_output_____
###Markdown
**Transformer encoder implemented as a subclassed Layer**
###Code
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class TransformerEncoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim)
self.dense_proj = keras.Sequential(
[layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
def call(self, inputs, mask=None):
if mask is not None:
mask = mask[:, tf.newaxis, :]
attention_output = self.attention(
inputs, inputs, attention_mask=mask)
proj_input = self.layernorm_1(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return self.layernorm_2(proj_input + proj_output)
def get_config(self):
config = super().get_config()
config.update({
"embed_dim": self.embed_dim,
"num_heads": self.num_heads,
"dense_dim": self.dense_dim,
})
return config
###Output
_____no_output_____
###Markdown
**Text classification model that combines the Transformer encoder and a pooling layer**
###Code
vocab_size = 20000
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
###Output
_____no_output_____
###Markdown
**Training and evaluating the Transformer encoder based model**
###Code
callbacks = [
keras.callbacks.ModelCheckpoint("transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
_____no_output_____
###Markdown
Using positional encoding to reinject order information **Implementing positional embedding as a subclassed layer**
###Code
class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, input_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.token_embeddings = layers.Embedding(
input_dim=input_dim, output_dim=output_dim)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=output_dim)
self.sequence_length = sequence_length
self.input_dim = input_dim
self.output_dim = output_dim
def call(self, inputs):
length = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_tokens = self.token_embeddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def compute_mask(self, inputs, mask=None):
return tf.math.not_equal(inputs, 0)
def get_config(self):
config = super().get_config()
config.update({
"output_dim": self.output_dim,
"sequence_length": self.sequence_length,
"input_dim": self.input_dim,
})
return config
###Output
_____no_output_____
###Markdown
Putting it all together: a text-classification Transformer **Text classification model that combines positional embedding, the Transformer encoder, and a pooling layer**
###Code
vocab_size = 20000
sequence_length = 600
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
callbacks = [
keras.callbacks.ModelCheckpoint("full_transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"full_transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder,
"PositionalEmbedding": PositionalEmbedding})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
_____no_output_____
###Markdown
This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.**If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**This notebook was generated for TensorFlow 2.6. The Transformer architecture Understanding self-attention Generalized self-attention: the query-key-value model Multi-Head attention The Transformer encoder **Getting the data**
###Code
!curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -xf aclImdb_v1.tar.gz
!rm -r aclImdb/train/unsup
###Output
_____no_output_____
###Markdown
**Preparing the data**
###Code
import os, pathlib, shutil, random
from tensorflow import keras
batch_size = 32
base_dir = pathlib.Path("aclImdb")
val_dir = base_dir / "val"
train_dir = base_dir / "train"
for category in ("neg", "pos"):
os.makedirs(val_dir / category)
files = os.listdir(train_dir / category)
random.Random(1337).shuffle(files)
num_val_samples = int(0.2 * len(files))
val_files = files[-num_val_samples:]
for fname in val_files:
shutil.move(train_dir / category / fname,
val_dir / category / fname)
train_ds = keras.preprocessing.text_dataset_from_directory(
"aclImdb/train", batch_size=batch_size
)
val_ds = keras.preprocessing.text_dataset_from_directory(
"aclImdb/val", batch_size=batch_size
)
test_ds = keras.preprocessing.text_dataset_from_directory(
"aclImdb/test", batch_size=batch_size
)
text_only_train_ds = train_ds.map(lambda x, y: x)
###Output
_____no_output_____
###Markdown
**Vectorizing the data**
###Code
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
max_length = 600
max_tokens = 20000
text_vectorization = TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_length,
)
text_vectorization.adapt(text_only_train_ds)
int_train_ds = train_ds.map(lambda x, y: (text_vectorization(x), y))
int_val_ds = val_ds.map(lambda x, y: (text_vectorization(x), y))
int_test_ds = test_ds.map(lambda x, y: (text_vectorization(x), y))
###Output
_____no_output_____
###Markdown
**Transformer encoder implemented as a subclassed Layer**
###Code
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class TransformerEncoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim)
self.dense_proj = keras.Sequential(
[layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
def call(self, inputs, mask=None):
if mask is not None:
mask = mask[:, tf.newaxis, :]
attention_output = self.attention(
inputs, inputs, attention_mask=mask)
proj_input = self.layernorm_1(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return self.layernorm_2(proj_input + proj_output)
def get_config(self):
config = super().get_config()
config.update({
"embed_dim": self.embed_dim,
"num_heads": self.num_heads,
"dense_dim": self.dense_dim,
})
return config
###Output
_____no_output_____
###Markdown
**Text classification model that combines the Transformer encoder and a pooling layer**
###Code
vocab_size = 20000
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
###Output
_____no_output_____
###Markdown
**Training and evaluating the Transformer encoder based model**
###Code
callbacks = [
keras.callbacks.ModelCheckpoint("transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
_____no_output_____
###Markdown
Using positional encoding to reinject order information **Implementing positional embedding as a subclassed layer**
###Code
class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, input_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.token_embeddings = layers.Embedding(
input_dim=input_dim, output_dim=output_dim)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=output_dim)
self.sequence_length = sequence_length
self.input_dim = input_dim
self.output_dim = output_dim
def call(self, inputs):
length = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_tokens = self.token_embeddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def compute_mask(self, inputs, mask=None):
return tf.math.not_equal(inputs, 0)
def get_config(self):
config = super().get_config()
config.update({
"output_dim": self.output_dim,
"sequence_length": self.sequence_length,
"input_dim": self.input_dim,
})
return config
###Output
_____no_output_____
###Markdown
Putting it all together: a text-classification Transformer **Text classification model that combines positional embedding, the Transformer encoder, and a pooling layer**
###Code
vocab_size = 20000
sequence_length = 600
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
callbacks = [
keras.callbacks.ModelCheckpoint("full_transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"full_transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder,
"PositionalEmbedding": PositionalEmbedding})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
_____no_output_____
###Markdown
This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.**If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**This notebook was generated for TensorFlow 2.6. The Transformer architecture Understanding self-attention Generalized self-attention: the query-key-value model Multi-head attention The Transformer encoder **Getting the data**
###Code
!curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -xf aclImdb_v1.tar.gz
!rm -r aclImdb/train/unsup
###Output
_____no_output_____
###Markdown
**Preparing the data**
###Code
import os, pathlib, shutil, random
from tensorflow import keras
batch_size = 32
base_dir = pathlib.Path("aclImdb")
val_dir = base_dir / "val"
train_dir = base_dir / "train"
for category in ("neg", "pos"):
os.makedirs(val_dir / category)
files = os.listdir(train_dir / category)
random.Random(1337).shuffle(files)
num_val_samples = int(0.2 * len(files))
val_files = files[-num_val_samples:]
for fname in val_files:
shutil.move(train_dir / category / fname,
val_dir / category / fname)
train_ds = keras.utils.text_dataset_from_directory(
"aclImdb/train", batch_size=batch_size
)
val_ds = keras.utils.text_dataset_from_directory(
"aclImdb/val", batch_size=batch_size
)
test_ds = keras.utils.text_dataset_from_directory(
"aclImdb/test", batch_size=batch_size
)
text_only_train_ds = train_ds.map(lambda x, y: x)
###Output
_____no_output_____
###Markdown
**Vectorizing the data**
###Code
from tensorflow.keras import layers
max_length = 600
max_tokens = 20000
text_vectorization = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_length,
)
text_vectorization.adapt(text_only_train_ds)
int_train_ds = train_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
int_val_ds = val_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
int_test_ds = test_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
###Output
_____no_output_____
###Markdown
**Transformer encoder implemented as a subclassed `Layer`**
###Code
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class TransformerEncoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim)
self.dense_proj = keras.Sequential(
[layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
def call(self, inputs, mask=None):
if mask is not None:
mask = mask[:, tf.newaxis, :]
attention_output = self.attention(
inputs, inputs, attention_mask=mask)
proj_input = self.layernorm_1(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return self.layernorm_2(proj_input + proj_output)
def get_config(self):
config = super().get_config()
config.update({
"embed_dim": self.embed_dim,
"num_heads": self.num_heads,
"dense_dim": self.dense_dim,
})
return config
###Output
_____no_output_____
###Markdown
**Using the Transformer encoder for text classification**
###Code
vocab_size = 20000
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
###Output
_____no_output_____
###Markdown
**Training and evaluating the Transformer encoder based model**
###Code
callbacks = [
keras.callbacks.ModelCheckpoint("transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
_____no_output_____
###Markdown
Using positional encoding to re-inject order information **Implementing positional embedding as a subclassed layer**
###Code
class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, input_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.token_embeddings = layers.Embedding(
input_dim=input_dim, output_dim=output_dim)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=output_dim)
self.sequence_length = sequence_length
self.input_dim = input_dim
self.output_dim = output_dim
def call(self, inputs):
length = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_tokens = self.token_embeddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def compute_mask(self, inputs, mask=None):
return tf.math.not_equal(inputs, 0)
def get_config(self):
config = super().get_config()
config.update({
"output_dim": self.output_dim,
"sequence_length": self.sequence_length,
"input_dim": self.input_dim,
})
return config
###Output
_____no_output_____
###Markdown
Putting it all together: A text-classification Transformer **Combining the Transformer encoder with positional embedding**
###Code
vocab_size = 20000
sequence_length = 600
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
callbacks = [
keras.callbacks.ModelCheckpoint("full_transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"full_transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder,
"PositionalEmbedding": PositionalEmbedding})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
_____no_output_____
###Markdown
This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.**If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**This notebook was generated for TensorFlow 2.6. The Transformer architecture Understanding self-attention Generalized self-attention: the query-key-value model Multi-Head attention The Transformer encoder **Getting the data**
###Code
import tensorflow as tf
if tf.test.gpu_device_name():
print('Default GPU Device:',tf.test.gpu_device_name())
else:
print("Please install GPU version of TF")
!curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -xf aclImdb_v1.tar.gz
!rm -r aclImdb/train/unsup
###Output
_____no_output_____
###Markdown
**Preparing the data**
###Code
import os, pathlib, shutil, random
from tensorflow import keras
batch_size = 32
base_dir = pathlib.Path("../dlkeras/aclImdb")
val_dir = base_dir / "val"
train_dir = base_dir / "train"
for category in ("neg", "pos"):
os.makedirs(val_dir / category)
files = os.listdir(train_dir / category)
random.Random(1337).shuffle(files)
num_val_samples = int(0.2 * len(files))
val_files = files[-num_val_samples:]
for fname in val_files:
shutil.move(train_dir / category / fname,
val_dir / category / fname)
train_ds = keras.utils.text_dataset_from_directory(
"../dlkeras/aclImdb/train", batch_size=batch_size
)
val_ds = keras.utils.text_dataset_from_directory(
"../dlkeras/aclImdb/val", batch_size=batch_size
)
test_ds = keras.utils.text_dataset_from_directory(
"../dlkeras/aclImdb/test", batch_size=batch_size
)
text_only_train_ds = train_ds.map(lambda x, y: x)
###Output
Found 24862 files belonging to 4 classes.
Found 3972 files belonging to 2 classes.
Found 25000 files belonging to 2 classes.
###Markdown
**Vectorizing the data**
###Code
from tensorflow.keras import layers
max_length = 600
max_tokens = 20000
text_vectorization = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_length,
)
text_vectorization.adapt(text_only_train_ds)
int_train_ds = train_ds.map(lambda x, y: (text_vectorization(x), y))
int_val_ds = val_ds.map(lambda x, y: (text_vectorization(x), y))
int_test_ds = test_ds.map(lambda x, y: (text_vectorization(x), y))
###Output
_____no_output_____
###Markdown
**Transformer encoder implemented as a subclassed Layer**
###Code
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class TransformerEncoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim)
self.dense_proj = keras.Sequential(
[layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
def call(self, inputs, mask=None):
if mask is not None:
mask = mask[:, tf.newaxis, :]
attention_output = self.attention(
inputs, inputs, attention_mask=mask)
proj_input = self.layernorm_1(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return self.layernorm_2(proj_input + proj_output)
def get_config(self):
config = super().get_config()
config.update({
"embed_dim": self.embed_dim,
"num_heads": self.num_heads,
"dense_dim": self.dense_dim,
})
return config
###Output
_____no_output_____
###Markdown
**Text classification model that combines the Transformer encoder and a pooling layer**
###Code
vocab_size = 20000
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
###Output
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, None)] 0
_________________________________________________________________
embedding (Embedding) (None, None, 256) 5120000
_________________________________________________________________
transformer_encoder (Transfo (None, None, 256) 543776
_________________________________________________________________
global_max_pooling1d (Global (None, 256) 0
_________________________________________________________________
dropout (Dropout) (None, 256) 0
_________________________________________________________________
dense_2 (Dense) (None, 1) 257
=================================================================
Total params: 5,664,033
Trainable params: 5,664,033
Non-trainable params: 0
_________________________________________________________________
###Markdown
**Training and evaluating the Transformer encoder based model**
###Code
callbacks = [
keras.callbacks.ModelCheckpoint("transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
Epoch 1/20
Extension horovod.torch has not been built: /usr/local/lib/python3.8/site-packages/horovod/torch/mpi_lib/_mpi_lib.cpython-38-x86_64-linux-gnu.so not found
If this is not expected, reinstall Horovod with HOROVOD_WITH_PYTORCH=1 to debug the build error.
Warning! MPI libs are missing, but python applications are still avaiable.
[2022-02-16 15:00:14.238 tensorflow-2-6-gpu--ml-g4dn-xlarge-0201d392adbb6351f674da89d00b:70 INFO utils.py:27] RULE_JOB_STOP_SIGNAL_FILENAME: None
[2022-02-16 15:00:14.305 tensorflow-2-6-gpu--ml-g4dn-xlarge-0201d392adbb6351f674da89d00b:70 INFO profiler_config_parser.py:111] Unable to find config at /opt/ml/input/config/profilerconfig.json. Profiler is disabled.
777/777 [==============================] - 50s 57ms/step - loss: -831.0703 - accuracy: 0.4018 - val_loss: 1666.6791 - val_accuracy: 0.3706
Epoch 2/20
777/777 [==============================] - 44s 57ms/step - loss: -3769.3396 - accuracy: 0.4022 - val_loss: 4955.7417 - val_accuracy: 0.3706
Epoch 3/20
777/777 [==============================] - 45s 57ms/step - loss: -8653.6533 - accuracy: 0.4022 - val_loss: 9818.3574 - val_accuracy: 0.3706
Epoch 4/20
777/777 [==============================] - 45s 58ms/step - loss: -15493.8848 - accuracy: 0.4022 - val_loss: 16292.0654 - val_accuracy: 0.3706
Epoch 5/20
777/777 [==============================] - 46s 59ms/step - loss: -24314.1172 - accuracy: 0.4022 - val_loss: 24504.1719 - val_accuracy: 0.3706
Epoch 6/20
777/777 [==============================] - 45s 58ms/step - loss: -35113.2305 - accuracy: 0.4022 - val_loss: 34248.4492 - val_accuracy: 0.3706
Epoch 7/20
777/777 [==============================] - 44s 57ms/step - loss: -47832.5469 - accuracy: 0.4022 - val_loss: 45647.3789 - val_accuracy: 0.3706
Epoch 8/20
777/777 [==============================] - 44s 57ms/step - loss: -62644.5352 - accuracy: 0.4022 - val_loss: 58619.9570 - val_accuracy: 0.3706
Epoch 9/20
777/777 [==============================] - 45s 57ms/step - loss: -79338.5078 - accuracy: 0.4022 - val_loss: 73273.8672 - val_accuracy: 0.3706
Epoch 10/20
777/777 [==============================] - 45s 58ms/step - loss: -98068.5312 - accuracy: 0.4022 - val_loss: 89717.8750 - val_accuracy: 0.3706
Epoch 11/20
777/777 [==============================] - 46s 59ms/step - loss: -118929.1328 - accuracy: 0.4022 - val_loss: 107906.2500 - val_accuracy: 0.3706
Epoch 12/20
777/777 [==============================] - 46s 59ms/step - loss: -142163.0469 - accuracy: 0.4022 - val_loss: 127693.4219 - val_accuracy: 0.3706
Epoch 13/20
777/777 [==============================] - 45s 57ms/step - loss: -167401.0000 - accuracy: 0.4022 - val_loss: 150208.2500 - val_accuracy: 0.3706
Epoch 14/20
777/777 [==============================] - 45s 57ms/step - loss: -195372.1094 - accuracy: 0.4022 - val_loss: 174089.0625 - val_accuracy: 0.3706
Epoch 15/20
777/777 [==============================] - 45s 57ms/step - loss: -224879.7812 - accuracy: 0.4022 - val_loss: 199711.2500 - val_accuracy: 0.3706
Epoch 16/20
777/777 [==============================] - 45s 58ms/step - loss: -257012.5312 - accuracy: 0.4022 - val_loss: 226581.3906 - val_accuracy: 0.3706
Epoch 17/20
777/777 [==============================] - 45s 58ms/step - loss: -290681.6562 - accuracy: 0.4022 - val_loss: 255943.7812 - val_accuracy: 0.3706
Epoch 18/20
777/777 [==============================] - 46s 59ms/step - loss: -327153.4062 - accuracy: 0.4022 - val_loss: 286882.0625 - val_accuracy: 0.3706
Epoch 19/20
777/777 [==============================] - 46s 59ms/step - loss: -365059.1562 - accuracy: 0.4022 - val_loss: 319847.0625 - val_accuracy: 0.3706
Epoch 20/20
777/777 [==============================] - 45s 57ms/step - loss: -405135.6562 - accuracy: 0.4022 - val_loss: 354069.2500 - val_accuracy: 0.3706
782/782 [==============================] - 24s 30ms/step - loss: 1314.1091 - accuracy: 0.5000
Test acc: 0.500
###Markdown
Using positional encoding to reinject order information **Implementing positional embedding as a subclassed layer**
###Code
class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, input_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.token_embeddings = layers.Embedding(
input_dim=input_dim, output_dim=output_dim)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=output_dim)
self.sequence_length = sequence_length
self.input_dim = input_dim
self.output_dim = output_dim
def call(self, inputs):
length = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_tokens = self.token_embeddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def compute_mask(self, inputs, mask=None):
return tf.math.not_equal(inputs, 0)
def get_config(self):
config = super().get_config()
config.update({
"output_dim": self.output_dim,
"sequence_length": self.sequence_length,
"input_dim": self.input_dim,
})
return config
###Output
_____no_output_____
###Markdown
Putting it all together: a text-classification Transformer **Text classification model that combines positional embedding, the Transformer encoder, and a pooling layer**
###Code
vocab_size = 20000
sequence_length = 600
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
callbacks = [
keras.callbacks.ModelCheckpoint("full_transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"full_transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder,
"PositionalEmbedding": PositionalEmbedding})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, None)] 0
_________________________________________________________________
positional_embedding (Positi (None, None, 256) 5273600
_________________________________________________________________
transformer_encoder_1 (Trans (None, None, 256) 543776
_________________________________________________________________
global_max_pooling1d_1 (Glob (None, 256) 0
_________________________________________________________________
dropout_1 (Dropout) (None, 256) 0
_________________________________________________________________
dense_7 (Dense) (None, 1) 257
=================================================================
Total params: 5,817,633
Trainable params: 5,817,633
Non-trainable params: 0
_________________________________________________________________
Epoch 1/20
777/777 [==============================] - 47s 59ms/step - loss: -1616.1797 - accuracy: 0.4022 - val_loss: 3096.8364 - val_accuracy: 0.3706
Epoch 2/20
777/777 [==============================] - 46s 59ms/step - loss: -6844.6670 - accuracy: 0.4022 - val_loss: 8777.9707 - val_accuracy: 0.3706
Epoch 3/20
777/777 [==============================] - 46s 59ms/step - loss: -15195.5850 - accuracy: 0.4022 - val_loss: 17066.4805 - val_accuracy: 0.3706
Epoch 4/20
777/777 [==============================] - 47s 60ms/step - loss: -26717.5566 - accuracy: 0.4022 - val_loss: 28039.0215 - val_accuracy: 0.3706
Epoch 5/20
777/777 [==============================] - 46s 60ms/step - loss: -41325.4648 - accuracy: 0.4022 - val_loss: 41627.9102 - val_accuracy: 0.3706
Epoch 6/20
777/777 [==============================] - 45s 58ms/step - loss: -59213.5469 - accuracy: 0.4022 - val_loss: 57845.3594 - val_accuracy: 0.3706
Epoch 7/20
777/777 [==============================] - 45s 58ms/step - loss: -80150.7031 - accuracy: 0.4022 - val_loss: 76686.4531 - val_accuracy: 0.3706
Epoch 8/20
777/777 [==============================] - 46s 59ms/step - loss: -104311.3438 - accuracy: 0.4022 - val_loss: 98181.6250 - val_accuracy: 0.3706
Epoch 9/20
777/777 [==============================] - 46s 59ms/step - loss: -131529.3594 - accuracy: 0.4022 - val_loss: 122253.5234 - val_accuracy: 0.3706
Epoch 10/20
777/777 [==============================] - 46s 60ms/step - loss: -161854.7656 - accuracy: 0.4022 - val_loss: 148961.5781 - val_accuracy: 0.3706
Epoch 11/20
777/777 [==============================] - 47s 60ms/step - loss: -195082.8594 - accuracy: 0.4022 - val_loss: 178361.4062 - val_accuracy: 0.3706
Epoch 12/20
777/777 [==============================] - 46s 59ms/step - loss: -231895.1250 - accuracy: 0.4022 - val_loss: 210368.7031 - val_accuracy: 0.3706
Epoch 13/20
777/777 [==============================] - 45s 58ms/step - loss: -272062.1875 - accuracy: 0.4022 - val_loss: 244963.8594 - val_accuracy: 0.3706
Epoch 14/20
777/777 [==============================] - 46s 58ms/step - loss: -314642.1562 - accuracy: 0.4022 - val_loss: 282170.9375 - val_accuracy: 0.3706
Epoch 15/20
777/777 [==============================] - 46s 59ms/step - loss: -360785.5625 - accuracy: 0.4022 - val_loss: 322109.6875 - val_accuracy: 0.3706
Epoch 16/20
777/777 [==============================] - 46s 59ms/step - loss: -410018.7812 - accuracy: 0.4022 - val_loss: 364541.9375 - val_accuracy: 0.3706
Epoch 17/20
777/777 [==============================] - 47s 60ms/step - loss: -462661.5000 - accuracy: 0.4022 - val_loss: 409748.7188 - val_accuracy: 0.3706
Epoch 18/20
777/777 [==============================] - 46s 60ms/step - loss: -517708.3125 - accuracy: 0.4022 - val_loss: 457353.8750 - val_accuracy: 0.3706
Epoch 19/20
777/777 [==============================] - 45s 58ms/step - loss: -576801.1875 - accuracy: 0.4022 - val_loss: 507924.4688 - val_accuracy: 0.3706
Epoch 20/20
777/777 [==============================] - 45s 58ms/step - loss: -638402.8125 - accuracy: 0.4022 - val_loss: 560930.8125 - val_accuracy: 0.3706
782/782 [==============================] - 18s 23ms/step - loss: 2460.1792 - accuracy: 0.5000
Test acc: 0.500
###Markdown
This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.**If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**This notebook was generated for TensorFlow 2.6. The Transformer architectureTransformers are based on the simple mechanism of ***neural attention***. Understanding self-attentionThe idea behind "attention" is that some input features (e.g. some word tokens) are more important than others. The model pays more attention to some features and less to others.Two examples from other contexts are:- Max pooling in convnets: keep some features and discard the rest- TF-IDF normalization: assign importance scores to word tokens. Generalized self-attention: the query-key-value modelThe terminology **query**, **key**, and **Value** comes from search algorithms. Suppose you want to query your collection of photos for "*dog on the beach*."Each of your photoes has been annotated so that it has some **keys** associated with it, depending on the objects in the pictures [pic-1: beach, boat, tree; pic-2: beach, tree, dog; pic-3: dog]  Based on the keys, we can assess to what extent the picture matches the original querry (*dog on the beach*"), which has two word vectors (dog, and beach).This is done by assigning scores to each picture in the collection depending the closeness of the match. These scores are **values** as shown in the image in the above cell. For example the top an dthe bottom pictures have a score of 0.5 because only one word vector from the query matches the keys. The middle picture has a score of 1.0 because both word vectors match the keys. This is indeed picture of a *dog on the beach*. Multi-head attention- The term "multi-head" implies that the output space of the self-attention layer is factored into a set of independent **sub-spaces**, learned separately. Each sub-space is called a **head**.- This is similar in principle to how the *depthwise separable convolutions* work where the output space of the convolution is factored into many sub-spaces: *one for each channel* (RGB, for example). - Each sub-space gets learned separately. That is, the initial query, key, and value are sent through 3 independent sets of dense projections that result in 3 sets of modulated queries, keys, and values (see the graphic below). Each set is processed through neural attention, and the tree outputs are concatenated back together into a single output. -  The Transformer encoder  **Getting the data**We download IMDB data, ad unzip it.Also remove the unsupported directory named 'unsup' (!rm -r command)
###Code
!curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -xf aclImdb_v1.tar.gz
!rm -r aclImdb/train/unsup
###Output
_____no_output_____
###Markdown
**The data preparation workflow below is similar to the workflow in book section 11.3.1** (Preparing the IMDB movie reviews data). We are just re-using it here (see notebook chapter11_part01_introduction).Also see section notebook chapter11_part01_introduction for `lambda function` toy example and `map()` method. See the notebook sub-section on *Processing words as a set: The bag-of-words approach* **Preparing the data**1. We downloaded data, which has the following directory structure:>>```aclImdb/train/posaclImdb/train/negaclImdb/test/posaclImdb/test/neg/>>```2. We want to create a **validation directory** alongwith subdirectories `val/pos` and `val/neg` - Split the training data 80:2 in `train/pos` and `train/neg` - Shuffle data in both these subdirectories - Move 20% data each training subdirectory to, respectively, `val/pos` and `val/neg` 3. Use Keras utility `text_dataset_from_directory` to create batched dataset of text files and their labels for a directory structure.
###Code
import os, pathlib, shutil, random
from tensorflow import keras
# defining batch size
batch_size = 32
# Creating file path for validation directory
# and its subdirectories /pos and /neg
# adding 'exist_ok= True' argument on os.makedirs(), in case directory exists
base_dir = pathlib.Path("aclImdb")
val_dir = base_dir / "val"
train_dir = base_dir / "train"
for category in ("neg", "pos"):
os.makedirs(val_dir / category, exist_ok=True) #adding 'exist_ok=True'
files = os.listdir(train_dir / category)
random.Random(1337).shuffle(files)
num_val_samples = int(0.2 * len(files))
val_files = files[-num_val_samples:]
for fname in val_files:
shutil.move(train_dir / category / fname,
val_dir / category / fname)
# Using Keras utility text_dataset_from_directory to created batches of data
# of selected size for each of the /train, /val and /test directories.
train_ds = keras.utils.text_dataset_from_directory(
"aclImdb/train", batch_size=batch_size
)
val_ds = keras.utils.text_dataset_from_directory(
"aclImdb/val", batch_size=batch_size
)
test_ds = keras.utils.text_dataset_from_directory(
"aclImdb/test", batch_size=batch_size
)
# Using lambda function and map() method
# to put together training data(x) with their labels(y)
text_only_train_ds = train_ds.map(lambda x, y: x)
###Output
_____no_output_____
###Markdown
**Vectorizing the data**
###Code
from tensorflow.keras import layers
max_length = 600
max_tokens = 20000
text_vectorization = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_length,
)
text_vectorization.adapt(text_only_train_ds)
int_train_ds = train_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
int_val_ds = val_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
int_test_ds = test_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
###Output
_____no_output_____
###Markdown
**Transformer encoder implemented as a subclassed `Layer`**
###Code
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# TransformerEncoder class is defined that inherits from layers.Layer
class TransformerEncoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim)
self.dense_proj = keras.Sequential(
[layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
def call(self, inputs, mask=None):
if mask is not None:
mask = mask[:, tf.newaxis, :]
attention_output = self.attention(
inputs, inputs, attention_mask=mask)
proj_input = self.layernorm_1(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return self.layernorm_2(proj_input + proj_output)
def get_config(self):
config = super().get_config()
config.update({
"embed_dim": self.embed_dim,
"num_heads": self.num_heads,
"dense_dim": self.dense_dim,
})
return config
###Output
_____no_output_____
###Markdown
**Using the Transformer encoder for text classification**
###Code
vocab_size = 20000
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
###Output
_____no_output_____
###Markdown
**Training and evaluating the Transformer encoder based model**
###Code
callbacks = [
keras.callbacks.ModelCheckpoint("transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
_____no_output_____
###Markdown
Using positional encoding to re-inject order information **Implementing positional embedding as a subclassed layer**
###Code
class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, input_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.token_embeddings = layers.Embedding(
input_dim=input_dim, output_dim=output_dim)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=output_dim)
self.sequence_length = sequence_length
self.input_dim = input_dim
self.output_dim = output_dim
def call(self, inputs):
length = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_tokens = self.token_embeddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def compute_mask(self, inputs, mask=None):
return tf.math.not_equal(inputs, 0)
def get_config(self):
config = super().get_config()
config.update({
"output_dim": self.output_dim,
"sequence_length": self.sequence_length,
"input_dim": self.input_dim,
})
return config
###Output
_____no_output_____
###Markdown
Putting it all together: A text-classification Transformer **Combining the Transformer encoder with positional embedding**
###Code
vocab_size = 20000
sequence_length = 600
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
callbacks = [
keras.callbacks.ModelCheckpoint("full_transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"full_transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder,
"PositionalEmbedding": PositionalEmbedding})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
_____no_output_____
###Markdown
This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.**If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**This notebook was generated for TensorFlow 2.6. The Transformer architecture Understanding self-attention Generalized self-attention: the query-key-value model Multi-Head attention The Transformer encoder **Getting the data**
###Code
!curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -xf aclImdb_v1.tar.gz
!rm -r aclImdb/train/unsup
###Output
_____no_output_____
###Markdown
**Preparing the data**
###Code
import os, pathlib, shutil, random
from tensorflow import keras
batch_size = 32
base_dir = pathlib.Path("aclImdb")
val_dir = base_dir / "val"
train_dir = base_dir / "train"
for category in ("neg", "pos"):
os.makedirs(val_dir / category)
files = os.listdir(train_dir / category)
random.Random(1337).shuffle(files)
num_val_samples = int(0.2 * len(files))
val_files = files[-num_val_samples:]
for fname in val_files:
shutil.move(train_dir / category / fname,
val_dir / category / fname)
train_ds = keras.preprocessing.text_dataset_from_directory(
"aclImdb/train", batch_size=batch_size
)
val_ds = keras.preprocessing.text_dataset_from_directory(
"aclImdb/val", batch_size=batch_size
)
test_ds = keras.preprocessing.text_dataset_from_directory(
"aclImdb/test", batch_size=batch_size
)
text_only_train_ds = train_ds.map(lambda x, y: x)
###Output
_____no_output_____
###Markdown
**Vectorizing the data**
###Code
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
max_length = 600
max_tokens = 20000
text_vectorization = TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_length,
)
text_vectorization.adapt(text_only_train_ds)
int_train_ds = train_ds.map(lambda x, y: (text_vectorization(x), y))
int_val_ds = val_ds.map(lambda x, y: (text_vectorization(x), y))
int_test_ds = test_ds.map(lambda x, y: (text_vectorization(x), y))
###Output
_____no_output_____
###Markdown
**Transformer encoder implemented as a subclassed Layer**
###Code
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class TransformerEncoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim)
self.dense_proj = keras.Sequential(
[layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
def call(self, inputs, mask=None):
if mask is not None:
mask = mask[:, tf.newaxis, :]
attention_output = self.attention(
inputs, inputs, attention_mask=mask)
proj_input = self.layernorm_1(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return self.layernorm_2(proj_input + proj_output)
def get_config(self):
config = super(TransformerEncoder, self).get_config()
config.update({
"embed_dim": self.embed_dim,
"num_heads": self.num_heads,
"dense_dim": self.dense_dim,
})
return config
###Output
_____no_output_____
###Markdown
**Text classification model that combines the Transformer encoder and a pooling layer**
###Code
vocab_size = 20000
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
###Output
_____no_output_____
###Markdown
**Training and evaluating the Transformer encoder based model**
###Code
callbacks = [
keras.callbacks.ModelCheckpoint("transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
_____no_output_____
###Markdown
Using positional encoding to reinject order information **Implementing positional embedding as a subclassed layer**
###Code
class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, input_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.token_embeddings = layers.Embedding(
input_dim=input_dim, output_dim=output_dim)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=output_dim)
self.sequence_length = sequence_length
self.input_dim = input_dim
self.output_dim = output_dim
def call(self, inputs):
length = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_tokens = self.token_embeddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def compute_mask(self, inputs, mask=None):
return tf.math.not_equal(inputs, 0)
def get_config(self):
config = super(PositionalEmbedding, self).get_config()
config.update({
"output_dim": self.output_dim,
"sequence_length": self.sequence_length,
"input_dim": self.input_dim,
})
return config
###Output
_____no_output_____
###Markdown
Putting it all together: a text-classification Transformer **Text classification model that combines positional embedding, the Transformer encoder, and a pooling layer**
###Code
vocab_size = 20000
sequence_length = 600
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
callbacks = [
keras.callbacks.ModelCheckpoint("full_transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"full_transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder,
"PositionalEmbedding": PositionalEmbedding})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
_____no_output_____
###Markdown
The Transformer architecture Understanding self-attention self-attention -> can make features *context-aware* "I'll **see** you later soon"? - I'll **see** this project to its end- I **see** what you mean smart embedding space -> provide different vector representation for a word depending on the other words https://github.com/eubinecto/k4ji_ai/issues/42 Generalized self-attention: the query-key-value model ```outputs = sum(input c * pairwise_scores(input a, input b)```input a = query input b = key input c = value 쿼리의 각 요소에 대해 각 요소가 모든 키와 얼마나 관련이 있는지 계산하고 이 점수를 사용하여 값의 합에 가중치를 부여한다 Multi-head attention The Transformer encoder **Getting the data**
###Code
!curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -xf aclImdb_v1.tar.gz
!rm -r aclImdb/train/unsup
###Output
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 80.2M 100 80.2M 0 0 28.3M 0 0:00:02 0:00:02 --:--:-- 28.3M
###Markdown
**Preparing the data**
###Code
import os, pathlib, shutil, random
from tensorflow import keras
batch_size = 32
base_dir = pathlib.Path("aclImdb")
val_dir = base_dir / "val"
train_dir = base_dir / "train"
for category in ("neg", "pos"):
os.makedirs(val_dir / category)
files = os.listdir(train_dir / category)
random.Random(1337).shuffle(files)
num_val_samples = int(0.2 * len(files))
val_files = files[-num_val_samples:]
for fname in val_files:
shutil.move(train_dir / category / fname,
val_dir / category / fname)
train_ds = keras.utils.text_dataset_from_directory(
"aclImdb/train", batch_size=batch_size
)
val_ds = keras.utils.text_dataset_from_directory(
"aclImdb/val", batch_size=batch_size
)
test_ds = keras.utils.text_dataset_from_directory(
"aclImdb/test", batch_size=batch_size
)
text_only_train_ds = train_ds.map(lambda x, y: x)
###Output
Found 20000 files belonging to 2 classes.
Found 5000 files belonging to 2 classes.
Found 25000 files belonging to 2 classes.
###Markdown
**Vectorizing the data**
###Code
from tensorflow.keras import layers
max_length = 600
max_tokens = 20000
text_vectorization = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_length,
)
text_vectorization.adapt(text_only_train_ds)
int_train_ds = train_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
int_val_ds = val_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
int_test_ds = test_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
###Output
_____no_output_____
###Markdown
**Transformer encoder implemented as a subclassed `Layer`**
###Code
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class TransformerEncoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim # size of input token vectors
self.dense_dim = dense_dim # size of inner dense layer
self.num_heads = num_heads # number of attention heads
self.attention = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim)
self.dense_proj = keras.Sequential(
[layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
def call(self, inputs, mask=None):
if mask is not None:
mask = mask[:, tf.newaxis, :]
attention_output = self.attention(
inputs, inputs, attention_mask=mask)
proj_input = self.layernorm_1(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return self.layernorm_2(proj_input + proj_output)
def get_config(self):
config = super().get_config()
config.update({
"embed_dim": self.embed_dim,
"num_heads": self.num_heads,
"dense_dim": self.dense_dim,
})
return config
###Output
_____no_output_____
###Markdown
**Using the Transformer encoder for text classification**
###Code
vocab_size = 20000
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x) ###
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
###Output
Model: "model_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_3 (InputLayer) [(None, None)] 0
embedding_5 (Embedding) (None, None, 256) 5120000
transformer_encoder_2 (Tran (None, None, 256) 543776
sformerEncoder)
global_max_pooling1d_2 (Glo (None, 256) 0
balMaxPooling1D)
dropout_2 (Dropout) (None, 256) 0
dense_12 (Dense) (None, 1) 257
=================================================================
Total params: 5,664,033
Trainable params: 5,664,033
Non-trainable params: 0
_________________________________________________________________
###Markdown
**Training and evaluating the Transformer encoder based model**
###Code
callbacks = [
keras.callbacks.ModelCheckpoint("transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
Epoch 1/20
625/625 [==============================] - 49s 67ms/step - loss: 0.4922 - accuracy: 0.7697 - val_loss: 0.3240 - val_accuracy: 0.8620
Epoch 2/20
625/625 [==============================] - 42s 68ms/step - loss: 0.3197 - accuracy: 0.8623 - val_loss: 0.2767 - val_accuracy: 0.8854
Epoch 3/20
625/625 [==============================] - 42s 68ms/step - loss: 0.2483 - accuracy: 0.8992 - val_loss: 0.3854 - val_accuracy: 0.8578
Epoch 4/20
625/625 [==============================] - 43s 69ms/step - loss: 0.1933 - accuracy: 0.9245 - val_loss: 0.3755 - val_accuracy: 0.8716
Epoch 5/20
625/625 [==============================] - 43s 69ms/step - loss: 0.1581 - accuracy: 0.9401 - val_loss: 0.3854 - val_accuracy: 0.8620
Epoch 6/20
625/625 [==============================] - 42s 68ms/step - loss: 0.1330 - accuracy: 0.9496 - val_loss: 0.3645 - val_accuracy: 0.8896
Epoch 7/20
625/625 [==============================] - 44s 71ms/step - loss: 0.1148 - accuracy: 0.9571 - val_loss: 0.4519 - val_accuracy: 0.8842
Epoch 8/20
625/625 [==============================] - 45s 72ms/step - loss: 0.0987 - accuracy: 0.9634 - val_loss: 0.4820 - val_accuracy: 0.8878
Epoch 9/20
625/625 [==============================] - 44s 70ms/step - loss: 0.0823 - accuracy: 0.9704 - val_loss: 0.4956 - val_accuracy: 0.8830
Epoch 10/20
625/625 [==============================] - 44s 70ms/step - loss: 0.0678 - accuracy: 0.9746 - val_loss: 0.5375 - val_accuracy: 0.8814
Epoch 11/20
625/625 [==============================] - 44s 70ms/step - loss: 0.0548 - accuracy: 0.9805 - val_loss: 0.6794 - val_accuracy: 0.8784
Epoch 12/20
625/625 [==============================] - 44s 70ms/step - loss: 0.0458 - accuracy: 0.9836 - val_loss: 0.7270 - val_accuracy: 0.8518
Epoch 13/20
625/625 [==============================] - 44s 70ms/step - loss: 0.0377 - accuracy: 0.9872 - val_loss: 0.8070 - val_accuracy: 0.8710
Epoch 14/20
625/625 [==============================] - 44s 70ms/step - loss: 0.0310 - accuracy: 0.9901 - val_loss: 0.6353 - val_accuracy: 0.8714
Epoch 15/20
625/625 [==============================] - 44s 70ms/step - loss: 0.0219 - accuracy: 0.9934 - val_loss: 0.9016 - val_accuracy: 0.8624
Epoch 16/20
625/625 [==============================] - 44s 70ms/step - loss: 0.0177 - accuracy: 0.9948 - val_loss: 1.0087 - val_accuracy: 0.8600
Epoch 17/20
625/625 [==============================] - 44s 70ms/step - loss: 0.0212 - accuracy: 0.9944 - val_loss: 0.7973 - val_accuracy: 0.8666
Epoch 18/20
625/625 [==============================] - 45s 72ms/step - loss: 0.0129 - accuracy: 0.9961 - val_loss: 1.3049 - val_accuracy: 0.8498
Epoch 19/20
625/625 [==============================] - 44s 70ms/step - loss: 0.0142 - accuracy: 0.9961 - val_loss: 1.1459 - val_accuracy: 0.8654
Epoch 20/20
625/625 [==============================] - 43s 69ms/step - loss: 0.0130 - accuracy: 0.9961 - val_loss: 1.3378 - val_accuracy: 0.8588
782/782 [==============================] - 21s 26ms/step - loss: 0.2967 - accuracy: 0.8741
Test acc: 0.874
###Markdown
Using positional encoding to re-inject order information 모델이 순서 정보에 접근할 수 있도록 단어 위치 추가 **Implementing positional embedding as a subclassed layer**
###Code
class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, input_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.token_embeddings = layers.Embedding(
input_dim=input_dim, output_dim=output_dim)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=output_dim)
self.sequence_length = sequence_length
self.input_dim = input_dim
self.output_dim = output_dim
def call(self, inputs):
length = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_tokens = self.token_embeddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def compute_mask(self, inputs, mask=None):
return tf.math.not_equal(inputs, 0)
def get_config(self):
config = super().get_config()
config.update({
"output_dim": self.output_dim,
"sequence_length": self.sequence_length,
"input_dim": self.input_dim,
})
return config
###Output
_____no_output_____
###Markdown
Putting it all together: A text-classification Transformer **Combining the Transformer encoder with positional embedding**
###Code
vocab_size = 20000
sequence_length = 600
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
callbacks = [
keras.callbacks.ModelCheckpoint("full_transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"full_transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder,
"PositionalEmbedding": PositionalEmbedding})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, None)] 0
positional_embedding (Posit (None, None, 256) 5273600
ionalEmbedding)
transformer_encoder_1 (Tran (None, None, 256) 543776
sformerEncoder)
global_max_pooling1d_1 (Glo (None, 256) 0
balMaxPooling1D)
dropout_1 (Dropout) (None, 256) 0
dense_7 (Dense) (None, 1) 257
=================================================================
Total params: 5,817,633
Trainable params: 5,817,633
Non-trainable params: 0
_________________________________________________________________
Epoch 1/20
625/625 [==============================] - 47s 72ms/step - loss: 0.4724 - accuracy: 0.7902 - val_loss: 0.2672 - val_accuracy: 0.8856
Epoch 2/20
625/625 [==============================] - 44s 70ms/step - loss: 0.2343 - accuracy: 0.9091 - val_loss: 0.2869 - val_accuracy: 0.8900
Epoch 3/20
625/625 [==============================] - 46s 73ms/step - loss: 0.1777 - accuracy: 0.9335 - val_loss: 0.3402 - val_accuracy: 0.8878
Epoch 4/20
625/625 [==============================] - 45s 71ms/step - loss: 0.1454 - accuracy: 0.9467 - val_loss: 0.3685 - val_accuracy: 0.8918
Epoch 5/20
625/625 [==============================] - 45s 71ms/step - loss: 0.1188 - accuracy: 0.9574 - val_loss: 0.3782 - val_accuracy: 0.8808
Epoch 6/20
625/625 [==============================] - 45s 71ms/step - loss: 0.1057 - accuracy: 0.9623 - val_loss: 0.3874 - val_accuracy: 0.8940
Epoch 7/20
625/625 [==============================] - 45s 71ms/step - loss: 0.0912 - accuracy: 0.9676 - val_loss: 0.4408 - val_accuracy: 0.8858
Epoch 8/20
625/625 [==============================] - 45s 71ms/step - loss: 0.0813 - accuracy: 0.9712 - val_loss: 0.4546 - val_accuracy: 0.8846
Epoch 9/20
625/625 [==============================] - 44s 71ms/step - loss: 0.0693 - accuracy: 0.9766 - val_loss: 0.5208 - val_accuracy: 0.8838
Epoch 10/20
625/625 [==============================] - 44s 71ms/step - loss: 0.0620 - accuracy: 0.9798 - val_loss: 0.5897 - val_accuracy: 0.8710
Epoch 11/20
625/625 [==============================] - 45s 71ms/step - loss: 0.0576 - accuracy: 0.9807 - val_loss: 0.5790 - val_accuracy: 0.8826
Epoch 12/20
625/625 [==============================] - 45s 71ms/step - loss: 0.0480 - accuracy: 0.9840 - val_loss: 0.6971 - val_accuracy: 0.8458
Epoch 13/20
625/625 [==============================] - 46s 73ms/step - loss: 0.0399 - accuracy: 0.9868 - val_loss: 0.5955 - val_accuracy: 0.8826
Epoch 14/20
625/625 [==============================] - 44s 69ms/step - loss: 0.0370 - accuracy: 0.9888 - val_loss: 0.5647 - val_accuracy: 0.8798
Epoch 15/20
625/625 [==============================] - 45s 71ms/step - loss: 0.0308 - accuracy: 0.9905 - val_loss: 0.8191 - val_accuracy: 0.8748
Epoch 16/20
625/625 [==============================] - 44s 69ms/step - loss: 0.0283 - accuracy: 0.9909 - val_loss: 0.7610 - val_accuracy: 0.8726
Epoch 17/20
625/625 [==============================] - 45s 71ms/step - loss: 0.0234 - accuracy: 0.9924 - val_loss: 0.7302 - val_accuracy: 0.8766
Epoch 18/20
625/625 [==============================] - 45s 71ms/step - loss: 0.0228 - accuracy: 0.9927 - val_loss: 0.7670 - val_accuracy: 0.8774
Epoch 19/20
625/625 [==============================] - 45s 71ms/step - loss: 0.0215 - accuracy: 0.9934 - val_loss: 0.9392 - val_accuracy: 0.8714
Epoch 20/20
625/625 [==============================] - 45s 72ms/step - loss: 0.0137 - accuracy: 0.9958 - val_loss: 1.3506 - val_accuracy: 0.8364
782/782 [==============================] - 21s 27ms/step - loss: 0.2883 - accuracy: 0.8806
Test acc: 0.881
###Markdown
This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=ml-ninja&a_cid=11111111&chan=c2). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.**If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**This notebook was generated for TensorFlow 2.6. The Transformer architecture Understanding self-attention Generalized self-attention: the query-key-value model Multi-head attention The Transformer encoder **Getting the data**
###Code
!curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -xf aclImdb_v1.tar.gz
!rm -r aclImdb/train/unsup
###Output
_____no_output_____
###Markdown
**Preparing the data**
###Code
import os, pathlib, shutil, random
from tensorflow import keras
batch_size = 32
base_dir = pathlib.Path("aclImdb")
val_dir = base_dir / "val"
train_dir = base_dir / "train"
for category in ("neg", "pos"):
os.makedirs(val_dir / category)
files = os.listdir(train_dir / category)
random.Random(1337).shuffle(files)
num_val_samples = int(0.2 * len(files))
val_files = files[-num_val_samples:]
for fname in val_files:
shutil.move(train_dir / category / fname,
val_dir / category / fname)
train_ds = keras.utils.text_dataset_from_directory(
"aclImdb/train", batch_size=batch_size
)
val_ds = keras.utils.text_dataset_from_directory(
"aclImdb/val", batch_size=batch_size
)
test_ds = keras.utils.text_dataset_from_directory(
"aclImdb/test", batch_size=batch_size
)
text_only_train_ds = train_ds.map(lambda x, y: x)
###Output
_____no_output_____
###Markdown
**Vectorizing the data**
###Code
from tensorflow.keras import layers
max_length = 600
max_tokens = 20000
text_vectorization = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_length,
)
text_vectorization.adapt(text_only_train_ds)
int_train_ds = train_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
int_val_ds = val_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
int_test_ds = test_ds.map(
lambda x, y: (text_vectorization(x), y),
num_parallel_calls=4)
###Output
_____no_output_____
###Markdown
**Transformer encoder implemented as a subclassed `Layer`**
###Code
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class TransformerEncoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim)
self.dense_proj = keras.Sequential(
[layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
def call(self, inputs, mask=None):
if mask is not None:
mask = mask[:, tf.newaxis, :]
attention_output = self.attention(
inputs, inputs, attention_mask=mask)
proj_input = self.layernorm_1(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return self.layernorm_2(proj_input + proj_output)
def get_config(self):
config = super().get_config()
config.update({
"embed_dim": self.embed_dim,
"num_heads": self.num_heads,
"dense_dim": self.dense_dim,
})
return config
###Output
_____no_output_____
###Markdown
**Using the Transformer encoder for text classification**
###Code
vocab_size = 20000
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
###Output
_____no_output_____
###Markdown
**Training and evaluating the Transformer encoder based model**
###Code
callbacks = [
keras.callbacks.ModelCheckpoint("transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
_____no_output_____
###Markdown
Using positional encoding to re-inject order information **Implementing positional embedding as a subclassed layer**
###Code
class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, input_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.token_embeddings = layers.Embedding(
input_dim=input_dim, output_dim=output_dim)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=output_dim)
self.sequence_length = sequence_length
self.input_dim = input_dim
self.output_dim = output_dim
def call(self, inputs):
length = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_tokens = self.token_embeddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def compute_mask(self, inputs, mask=None):
return tf.math.not_equal(inputs, 0)
def get_config(self):
config = super().get_config()
config.update({
"output_dim": self.output_dim,
"sequence_length": self.sequence_length,
"input_dim": self.input_dim,
})
return config
###Output
_____no_output_____
###Markdown
Putting it all together: A text-classification Transformer **Combining the Transformer encoder with positional embedding**
###Code
vocab_size = 20000
sequence_length = 600
embed_dim = 256
num_heads = 2
dense_dim = 32
inputs = keras.Input(shape=(None,), dtype="int64")
x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.summary()
callbacks = [
keras.callbacks.ModelCheckpoint("full_transformer_encoder.keras",
save_best_only=True)
]
model.fit(int_train_ds, validation_data=int_val_ds, epochs=20, callbacks=callbacks)
model = keras.models.load_model(
"full_transformer_encoder.keras",
custom_objects={"TransformerEncoder": TransformerEncoder,
"PositionalEmbedding": PositionalEmbedding})
print(f"Test acc: {model.evaluate(int_test_ds)[1]:.3f}")
###Output
_____no_output_____ |
openfl-tutorials/interactive_api_tutorials_(experimental)/Pytorch_Kvasir_UNET_workspace/new_python_api_UNET.ipynb | ###Markdown
Federated PyTorch UNET Tutorial Using low-level Python API
###Code
# Install dependencies if not already installed
!pip install torchvision
!pip install scikit-image
###Output
_____no_output_____
###Markdown
Describe the model and optimizer
###Code
import torch
import torch.nn as nn
import torch.optim as optim
"""
UNet model definition
"""
from layers import soft_dice_coef, soft_dice_loss, double_conv, down, up
class UNet(nn.Module):
def __init__(self, n_channels=3, n_classes=1):
super().__init__()
self.inc = double_conv(n_channels, 64)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 1024)
self.up1 = up(1024, 512)
self.up2 = up(512, 256)
self.up3 = up(256, 128)
self.up4 = up(128, 64)
self.outc = nn.Conv2d(64, n_classes, 1)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
x = torch.sigmoid(x)
return x
model_unet = UNet()
optimizer_adam = optim.Adam(model_unet.parameters(), lr=1e-4)
###Output
_____no_output_____
###Markdown
Prepare data We ask user to keep all the test data in `data/` folder under the workspace as it will not be sent to collaborators
###Code
import os
from hashlib import sha384
import PIL
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms as tsf
from skimage import io
os.makedirs('data', exist_ok=True)
!wget -nc 'https://datasets.simula.no/hyper-kvasir/hyper-kvasir-segmented-images.zip' -O ./data/kvasir.zip
ZIP_SHA384 = 'e30d18a772c6520476e55b610a4db457237f151e'\
'19182849d54b49ae24699881c1e18e0961f77642be900450ef8b22e7'
assert sha384(open('./data/kvasir.zip', 'rb').read(
os.path.getsize('./data/kvasir.zip'))).hexdigest() == ZIP_SHA384
!unzip -n ./data/kvasir.zip -d ./data
DATA_PATH = './data/segmented-images/'
import numpy as np
def read_data(image_path, mask_path):
"""
Read image and mask from disk.
"""
img = io.imread(image_path)
assert(img.shape[2] == 3)
mask = io.imread(mask_path)
return (img, mask[:, :, 0].astype(np.uint8))
class KvasirDataset(Dataset):
"""
Kvasir dataset contains 1000 images for all collaborators.
Args:
data_path: path to dataset on disk
collaborator_count: total number of collaborators
collaborator_num: number of current collaborator
is_validation: validation option
"""
def __init__(self, images_path = './data/segmented-images/images/', \
masks_path = './data/segmented-images/masks/',
validation_fraction=1/8, is_validation=False):
self.images_path = images_path
self.masks_path = masks_path
self.images_names = [img_name for img_name in sorted(os.listdir(
self.images_path)) if len(img_name) > 3 and img_name[-3:] == 'jpg']
assert(len(self.images_names) > 2), "Too few images"
validation_size = max(1, int(len(self.images_names) * validation_fraction))
if is_validation:
self.images_names = self.images_names[-validation_size :]
else:
self.images_names = self.images_names[: -validation_size]
# Prepare transforms
self.img_trans = tsf.Compose([
tsf.ToPILImage(),
tsf.Resize((332, 332)),
tsf.ToTensor(),
tsf.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
self.mask_trans = tsf.Compose([
tsf.ToPILImage(),
tsf.Resize((332, 332), interpolation=PIL.Image.NEAREST),
tsf.ToTensor()])
def __getitem__(self, index):
name = self.images_names[index]
img, mask = read_data(self.images_path + name, self.masks_path + name)
img = self.img_trans(img).numpy()
mask = self.mask_trans(mask).numpy()
return img, mask
def __len__(self):
return len(self.images_names)
###Output
_____no_output_____
###Markdown
Define Federated Learning tasks
###Code
def train(unet_model, train_loader, optimizer, device, loss_fn=soft_dice_loss):
function_defined_in_notebook()
unet_model.train()
unet_model.to(device)
losses = []
for data, target in train_loader:
data, target = torch.tensor(data).to(device), torch.tensor(
target).to(device, dtype=torch.float32)
optimizer.zero_grad()
output = unet_model(data)
loss = loss_fn(output=output, target=target)
loss.backward()
optimizer.step()
losses.append(loss.detach().cpu().numpy())
return {'train_loss': np.mean(losses),}
def validate(unet_model, val_loader, device):
unet_model.eval()
unet_model.to(device)
val_score = 0
total_samples = 0
with torch.no_grad():
for data, target in val_loader:
samples = target.shape[0]
total_samples += samples
data, target = torch.tensor(data).to(device), \
torch.tensor(target).to(device, dtype=torch.int64)
output = unet_model(data)
val = soft_dice_coef(output, target)
val_score += val.sum().cpu().numpy()
return {'dice_coef': val_score / total_samples,}
###Output
_____no_output_____
###Markdown
Describing FL experiment
###Code
from openfl.interface.interactive_api.experiment import TaskInterface, DataInterface, ModelInterface, FLExperiment
###Output
_____no_output_____
###Markdown
Register model
###Code
from copy import deepcopy
framework_adapter = 'openfl.plugins.frameworks_adapters.pytorch_adapter.FrameworkAdapterPlugin'
MI = ModelInterface(model=model_unet, optimizer=optimizer_adam, framework_plugin=framework_adapter)
# Save the initial model state
initial_model = deepcopy(model_unet)
###Output
_____no_output_____
###Markdown
Register dataset We extract User dataset class implementation.Is it convinient?What if the dataset is not a class?
###Code
class UserDataset:
def __init__(self, path_to_local_data):
print(f'User Dataset initialized with {path_to_local_data}')
class OpenflMixin:
def _delayed_init(self):
raise NotImplementedError
class FedDataset(OpenflMixin):
def __init__(self, UserDataset):
self.user_dataset_class = UserDataset
print('We implement all abstract methods from mixin in this class')
def _delayed_init(self, data_path):
print('This method is called on the collaborator node')
dataset_obj = self.user_dataset_class(data_path)
fed_dataset = FedDataset(UserDataset)
fed_dataset._delayed_init('data path on the collaborator node')
class FedDataset(DataInterface):
def __init__(self, UserDatasetClass, **kwargs):
self.UserDatasetClass = UserDatasetClass
self.kwargs = kwargs
def _delayed_init(self, data_path='1,1'):
# With the next command the local dataset will be loaded on the collaborator node
# For this example we have the same dataset on the same path, and we will shard it
# So we use `data_path` information for this purpose.
self.rank, self.world_size = [int(part) for part in data_path.split(',')]
validation_fraction=1/8
self.train_set = self.UserDatasetClass(validation_fraction=validation_fraction, is_validation=False)
self.valid_set = self.UserDatasetClass(validation_fraction=validation_fraction, is_validation=True)
# Do the actual sharding
self._do_sharding( self.rank, self.world_size)
def _do_sharding(self, rank, world_size):
# This method relies on the dataset's implementation
# i.e. coupled in a bad way
self.train_set.images_names = self.train_set.images_names[ rank-1 :: world_size ]
def get_train_loader(self, **kwargs):
"""
Output of this method will be provided to tasks with optimizer in contract
"""
return DataLoader(
self.train_set, num_workers=8, batch_size=self.kwargs['train_bs'], shuffle=True
)
def get_valid_loader(self, **kwargs):
"""
Output of this method will be provided to tasks without optimizer in contract
"""
return DataLoader(self.valid_set, num_workers=8, batch_size=self.kwargs['valid_bs'])
def get_train_data_size(self):
"""
Information for aggregation
"""
return len(self.train_set)
def get_valid_data_size(self):
"""
Information for aggregation
"""
return len(self.valid_set)
fed_dataset = FedDataset(KvasirDataset, train_bs=8, valid_bs=8)
###Output
_____no_output_____
###Markdown
Register tasks
###Code
TI = TaskInterface()
import torch
import tqdm
# The Interactive API supports registering functions definied in main module or imported.
def function_defined_in_notebook(some_parameter):
print(f'Also I accept a parameter and it is {some_parameter}')
# Task interface currently supports only standalone functions.
@TI.add_kwargs(**{'some_parameter': 42})
@TI.register_fl_task(model='unet_model', data_loader='train_loader', \
device='device', optimizer='optimizer')
def train(unet_model, train_loader, optimizer, device, loss_fn=soft_dice_loss, some_parameter=None):
if not torch.cuda.is_available():
device = 'cpu'
function_defined_in_notebook(some_parameter)
train_loader = tqdm.tqdm(train_loader, desc="train")
unet_model.train()
unet_model.to(device)
losses = []
for data, target in train_loader:
data, target = torch.tensor(data).to(device), torch.tensor(
target).to(device, dtype=torch.float32)
optimizer.zero_grad()
output = unet_model(data)
loss = loss_fn(output=output, target=target)
loss.backward()
optimizer.step()
losses.append(loss.detach().cpu().numpy())
return {'train_loss': np.mean(losses),}
@TI.register_fl_task(model='unet_model', data_loader='val_loader', device='device')
def validate(unet_model, val_loader, device):
unet_model.eval()
unet_model.to(device)
val_loader = tqdm.tqdm(val_loader, desc="validate")
val_score = 0
total_samples = 0
with torch.no_grad():
for data, target in val_loader:
samples = target.shape[0]
total_samples += samples
data, target = torch.tensor(data).to(device), \
torch.tensor(target).to(device, dtype=torch.int64)
output = unet_model(data)
val = soft_dice_coef(output, target)
val_score += val.sum().cpu().numpy()
return {'dice_coef': val_score / total_samples,}
###Output
_____no_output_____
###Markdown
Time to start a federated learning experiment
###Code
# Create a federation
from openfl.interface.interactive_api.federation import Federation
# 1) Run with aggregator-collaborator mTLS
# If the user wants to enable mTLS their must provide CA root chain, and signed key pair to the federation interface
cert_chain = 'cert/cert_chain.crt'
agg_certificate = 'cert/agg_certificate.crt'
agg_private_key = 'cert/agg_private.key'
federation = Federation(central_node_fqdn='some.fqdn', disable_tls=False,
cert_chain=cert_chain, agg_certificate=agg_certificate, agg_private_key=agg_private_key)
col_data_paths = {'one': '1,1',}
federation.register_collaborators(col_data_paths=col_data_paths)
# --------------------------------------------------------------------------------------------------------------------
# 2) Run with TLS disabled (trusted environment)
# Federation can also determine local fqdn automatically
federation = Federation(central_node_fqdn='localhost', disable_tls=True)
# First number which is a collaborators rank may also be passed as a cuda device identifier
col_data_paths = {'one': '1,2',
'two': '2,2'}
federation.register_collaborators(col_data_paths=col_data_paths)
###Output
_____no_output_____
###Markdown
Certification of an aggregator* fx workspace certify: creates cert folder and CA as well as cert_chain* fx aggregator generate-cert-request --fqdn `FQDN`: you can pass a specific aggregator FQDN if you want* fx aggregator certify --fqdn `FQDN` --silent: signes aggregators cert After that just pass the paths to required certs to the Federation API Certification of a collaboratorjust follow the usual procedure: fx collaborator generate-cert-request -d {DATA_PATH} -n {COL} fx collaborator certify --request-pkg {COL_DIRECTORY}/{FED_WORKSPACE}/col_{COL}_to_agg_cert_request.zipfx collaborator certify --import {FED_DIRECTORY}/agg_to_col_{COL}_signed_cert.zip
###Code
# create an experimnet in federation
fl_experiment = FLExperiment(federation=federation,)
# If I use autoreload I got a pickling error
# The following command zips the workspace and python requirements to be transfered to collaborator nodes
fl_experiment.prepare_workspace_distribution(model_provider=MI, task_keeper=TI, data_loader=fed_dataset, rounds_to_train=7, \
opt_treatment='CONTINUE_GLOBAL')
# This command starts the aggregator server
fl_experiment.start_experiment(model_provider=MI)
# When the aggregator server blocks the notebook one can start collaborators
# For the test run just type console command from the workspace directory:
# `fx collaborator start -d data.yaml -n {col_name}` for all collaborators
# For the distributed experiment transfer zipped workspace to the collaborator nodes and run
# `fx workspace import --archive {workspace_name}.zip` cd to the workspace and start collaborators
###Output
_____no_output_____
###Markdown
Now we validate the best model!
###Code
best_model = fl_experiment.get_best_model()
fed_dataset._delayed_init()
# Validating initial model
validate(initial_model, fed_dataset.get_valid_loader(), 'cpu')
# Validating trained model
validate(best_model, fed_dataset.get_valid_loader(), 'cpu')
###Output
_____no_output_____
###Markdown
We can tune model further!
###Code
MI = ModelInterface(model=best_model, optimizer=optimizer_adam, framework_plugin=framework_adapter)
fl_experiment.start_experiment(model_provider=MI, task_keeper=TI, data_loader=fed_dataset, rounds_to_train=4, \
opt_treatment='CONTINUE_GLOBAL')
best_model = fl_experiment.get_best_model()
# Validating trained model
validate(best_model, fed_dataset.get_valid_loader(), 'cpu')
###Output
_____no_output_____
###Markdown
Federated PyTorch UNET Tutorial Using low-level Python API
###Code
# Install dependencies if not already installed
!pip install torchvision==0.8.1
!pip install scikit-image
###Output
_____no_output_____
###Markdown
Describe the model and optimizer
###Code
import torch
import torch.nn as nn
import torch.optim as optim
"""
UNet model definition
"""
from layers import soft_dice_coef, soft_dice_loss, DoubleConv, Down, Up
class UNet(nn.Module):
def __init__(self, n_channels=3, n_classes=1):
super().__init__()
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
self.down4 = Down(512, 1024)
self.up1 = Up(1024, 512)
self.up2 = Up(512, 256)
self.up3 = Up(256, 128)
self.up4 = Up(128, 64)
self.outc = nn.Conv2d(64, n_classes, 1)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
x = torch.sigmoid(x)
return x
model_unet = UNet()
optimizer_adam = optim.Adam(model_unet.parameters(), lr=1e-4)
###Output
_____no_output_____
###Markdown
Prepare data We ask user to keep all the test data in `data/` folder under the workspace as it will not be sent to collaborators
###Code
import os
from hashlib import sha384
import PIL
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms as tsf
from skimage import io
os.makedirs('data', exist_ok=True)
!wget -nc 'https://datasets.simula.no/hyper-kvasir/hyper-kvasir-segmented-images.zip' -O ./data/kvasir.zip
ZIP_SHA384 = 'e30d18a772c6520476e55b610a4db457237f151e'\
'19182849d54b49ae24699881c1e18e0961f77642be900450ef8b22e7'
assert sha384(open('./data/kvasir.zip', 'rb').read(
os.path.getsize('./data/kvasir.zip'))).hexdigest() == ZIP_SHA384
!unzip -n ./data/kvasir.zip -d ./data
DATA_PATH = './data/segmented-images/'
import numpy as np
def read_data(image_path, mask_path):
"""
Read image and mask from disk.
"""
img = io.imread(image_path)
assert(img.shape[2] == 3)
mask = io.imread(mask_path)
return (img, mask[:, :, 0].astype(np.uint8))
class KvasirDataset(Dataset):
"""
Kvasir dataset contains 1000 images for all collaborators.
Args:
data_path: path to dataset on disk
collaborator_count: total number of collaborators
collaborator_num: number of current collaborator
is_validation: validation option
"""
def __init__(self, images_path = './data/segmented-images/images/', \
masks_path = './data/segmented-images/masks/',
validation_fraction=1/8, is_validation=False):
self.images_path = images_path
self.masks_path = masks_path
self.images_names = [img_name for img_name in sorted(os.listdir(
self.images_path)) if len(img_name) > 3 and img_name[-3:] == 'jpg']
assert(len(self.images_names) > 2), "Too few images"
validation_size = max(1, int(len(self.images_names) * validation_fraction))
if is_validation:
self.images_names = self.images_names[-validation_size :]
else:
self.images_names = self.images_names[: -validation_size]
# Prepare transforms
self.img_trans = tsf.Compose([
tsf.ToPILImage(),
tsf.Resize((332, 332)),
tsf.ToTensor(),
tsf.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
self.mask_trans = tsf.Compose([
tsf.ToPILImage(),
tsf.Resize((332, 332), interpolation=PIL.Image.NEAREST),
tsf.ToTensor()])
def __getitem__(self, index):
name = self.images_names[index]
img, mask = read_data(self.images_path + name, self.masks_path + name)
img = self.img_trans(img).numpy()
mask = self.mask_trans(mask).numpy()
return img, mask
def __len__(self):
return len(self.images_names)
###Output
_____no_output_____
###Markdown
Define Federated Learning tasks
###Code
def train(unet_model, train_loader, optimizer, device, loss_fn=soft_dice_loss):
function_defined_in_notebook()
unet_model.train()
unet_model.to(device)
losses = []
for data, target in train_loader:
data, target = torch.tensor(data).to(device), torch.tensor(
target).to(device, dtype=torch.float32)
optimizer.zero_grad()
output = unet_model(data)
loss = loss_fn(output=output, target=target)
loss.backward()
optimizer.step()
losses.append(loss.detach().cpu().numpy())
return {'train_loss': np.mean(losses),}
def validate(unet_model, val_loader, device):
unet_model.eval()
unet_model.to(device)
val_score = 0
total_samples = 0
with torch.no_grad():
for data, target in val_loader:
samples = target.shape[0]
total_samples += samples
data, target = torch.tensor(data).to(device), \
torch.tensor(target).to(device, dtype=torch.int64)
output = unet_model(data)
val = soft_dice_coef(output, target)
val_score += val.sum().cpu().numpy()
return {'dice_coef': val_score / total_samples,}
###Output
_____no_output_____
###Markdown
Describing FL experiment
###Code
from openfl.interface.interactive_api.experiment import TaskInterface, DataInterface, ModelInterface, FLExperiment
###Output
_____no_output_____
###Markdown
Register model
###Code
from copy import deepcopy
framework_adapter = 'openfl.plugins.frameworks_adapters.pytorch_adapter.FrameworkAdapterPlugin'
MI = ModelInterface(model=model_unet, optimizer=optimizer_adam, framework_plugin=framework_adapter)
# Save the initial model state
initial_model = deepcopy(model_unet)
###Output
_____no_output_____
###Markdown
Register dataset We extract User dataset class implementation.Is it convinient?What if the dataset is not a class?
###Code
class UserDataset:
def __init__(self, path_to_local_data):
print(f'User Dataset initialized with {path_to_local_data}')
class OpenflMixin:
def _delayed_init(self):
raise NotImplementedError
class FedDataset(OpenflMixin):
def __init__(self, UserDataset):
self.user_dataset_class = UserDataset
print('We implement all abstract methods from mixin in this class')
def _delayed_init(self, data_path):
print('This method is called on the collaborator node')
dataset_obj = self.user_dataset_class(data_path)
fed_dataset = FedDataset(UserDataset)
fed_dataset._delayed_init('data path on the collaborator node')
class FedDataset(DataInterface):
def __init__(self, UserDatasetClass, **kwargs):
self.UserDatasetClass = UserDatasetClass
self.kwargs = kwargs
def _delayed_init(self, data_path='1,1'):
# With the next command the local dataset will be loaded on the collaborator node
# For this example we have the same dataset on the same path, and we will shard it
# So we use `data_path` information for this purpose.
self.rank, self.world_size = [int(part) for part in data_path.split(',')]
validation_fraction=1/8
self.train_set = self.UserDatasetClass(validation_fraction=validation_fraction, is_validation=False)
self.valid_set = self.UserDatasetClass(validation_fraction=validation_fraction, is_validation=True)
# Do the actual sharding
self._do_sharding( self.rank, self.world_size)
def _do_sharding(self, rank, world_size):
# This method relies on the dataset's implementation
# i.e. coupled in a bad way
self.train_set.images_names = self.train_set.images_names[ rank-1 :: world_size ]
def get_train_loader(self, **kwargs):
"""
Output of this method will be provided to tasks with optimizer in contract
"""
return DataLoader(
self.train_set, num_workers=8, batch_size=self.kwargs['train_bs'], shuffle=True
)
def get_valid_loader(self, **kwargs):
"""
Output of this method will be provided to tasks without optimizer in contract
"""
return DataLoader(self.valid_set, num_workers=8, batch_size=self.kwargs['valid_bs'])
def get_train_data_size(self):
"""
Information for aggregation
"""
return len(self.train_set)
def get_valid_data_size(self):
"""
Information for aggregation
"""
return len(self.valid_set)
fed_dataset = FedDataset(KvasirDataset, train_bs=8, valid_bs=8)
###Output
_____no_output_____
###Markdown
Register tasks
###Code
TI = TaskInterface()
import torch
import tqdm
# The Interactive API supports registering functions definied in main module or imported.
def function_defined_in_notebook(some_parameter):
print(f'Also I accept a parameter and it is {some_parameter}')
# Task interface currently supports only standalone functions.
@TI.add_kwargs(**{'some_parameter': 42})
@TI.register_fl_task(model='unet_model', data_loader='train_loader', \
device='device', optimizer='optimizer')
def train(unet_model, train_loader, optimizer, device, loss_fn=soft_dice_loss, some_parameter=None):
if not torch.cuda.is_available():
device = 'cpu'
function_defined_in_notebook(some_parameter)
train_loader = tqdm.tqdm(train_loader, desc="train")
unet_model.train()
unet_model.to(device)
losses = []
for data, target in train_loader:
data, target = torch.tensor(data).to(device), torch.tensor(
target).to(device, dtype=torch.float32)
optimizer.zero_grad()
output = unet_model(data)
loss = loss_fn(output=output, target=target)
loss.backward()
optimizer.step()
losses.append(loss.detach().cpu().numpy())
return {'train_loss': np.mean(losses),}
@TI.register_fl_task(model='unet_model', data_loader='val_loader', device='device')
def validate(unet_model, val_loader, device):
unet_model.eval()
unet_model.to(device)
val_loader = tqdm.tqdm(val_loader, desc="validate")
val_score = 0
total_samples = 0
with torch.no_grad():
for data, target in val_loader:
samples = target.shape[0]
total_samples += samples
data, target = torch.tensor(data).to(device), \
torch.tensor(target).to(device, dtype=torch.int64)
output = unet_model(data)
val = soft_dice_coef(output, target)
val_score += val.sum().cpu().numpy()
return {'dice_coef': val_score / total_samples,}
###Output
_____no_output_____
###Markdown
Time to start a federated learning experiment
###Code
# Create a federation
from openfl.interface.interactive_api.federation import Federation
# 1) Run with aggregator-collaborator mTLS
# If the user wants to enable mTLS their must provide CA root chain, and signed key pair to the federation interface
cert_chain = 'cert/cert_chain.crt'
agg_certificate = 'cert/agg_certificate.crt'
agg_private_key = 'cert/agg_private.key'
federation = Federation(central_node_fqdn='some.fqdn', disable_tls=False,
cert_chain=cert_chain, agg_certificate=agg_certificate, agg_private_key=agg_private_key)
col_data_paths = {'one': '1,1',}
federation.register_collaborators(col_data_paths=col_data_paths)
# --------------------------------------------------------------------------------------------------------------------
# 2) Run with TLS disabled (trusted environment)
# Federation can also determine local fqdn automatically
federation = Federation(central_node_fqdn='localhost', disable_tls=True)
# First number which is a collaborators rank may also be passed as a cuda device identifier
col_data_paths = {'one': '1,2',
'two': '2,2'}
federation.register_collaborators(col_data_paths=col_data_paths)
###Output
_____no_output_____
###Markdown
Certification of an aggregator* fx workspace certify: creates cert folder and CA as well as cert_chain* fx aggregator generate-cert-request --fqdn `FQDN`: you can pass a specific aggregator FQDN if you want* fx aggregator certify --fqdn `FQDN` --silent: signes aggregators cert After that just pass the paths to required certs to the Federation API Certification of a collaboratorjust follow the usual procedure: fx collaborator generate-cert-request -d {DATA_PATH} -n {COL} fx collaborator certify --request-pkg {COL_DIRECTORY}/{FED_WORKSPACE}/col_{COL}_to_agg_cert_request.zipfx collaborator certify --import {FED_DIRECTORY}/agg_to_col_{COL}_signed_cert.zip
###Code
# create an experimnet in federation
fl_experiment = FLExperiment(federation=federation,)
# If I use autoreload I got a pickling error
# The following command zips the workspace and python requirements to be transfered to collaborator nodes
fl_experiment.prepare_workspace_distribution(model_provider=MI, task_keeper=TI, data_loader=fed_dataset, rounds_to_train=7, \
opt_treatment='CONTINUE_GLOBAL')
# This command starts the aggregator server
fl_experiment.start_experiment(model_provider=MI)
# When the aggregator server blocks the notebook one can start collaborators
# For the test run just type console command from the workspace directory:
# `fx collaborator start -d data.yaml -n {col_name}` for all collaborators
# For the distributed experiment transfer zipped workspace to the collaborator nodes and run
# `fx workspace import --archive {workspace_name}.zip` cd to the workspace and start collaborators
###Output
_____no_output_____
###Markdown
Now we validate the best model!
###Code
best_model = fl_experiment.get_best_model()
fed_dataset._delayed_init()
# Validating initial model
validate(initial_model, fed_dataset.get_valid_loader(), 'cpu')
# Validating trained model
validate(best_model, fed_dataset.get_valid_loader(), 'cpu')
###Output
_____no_output_____
###Markdown
We can tune model further!
###Code
MI = ModelInterface(model=best_model, optimizer=optimizer_adam, framework_plugin=framework_adapter)
fl_experiment.start_experiment(model_provider=MI, task_keeper=TI, data_loader=fed_dataset, rounds_to_train=4, \
opt_treatment='CONTINUE_GLOBAL')
best_model = fl_experiment.get_best_model()
# Validating trained model
validate(best_model, fed_dataset.get_valid_loader(), 'cpu')
###Output
_____no_output_____ |
train_mix_model.ipynb | ###Markdown
Dara R Samii Login
###Code
from google.colab import drive
drive.mount("/content/drive",force_remount=True)
%cd /content/drive/MyDrive/DataDays2021/
pwd = %pwd
pwd
###Output
Mounted at /content/drive
/content/drive/MyDrive/DataDays2021
###Markdown
**-------------------------------------------------------------------------------------------------------------------------------** installing packages
###Code
!pip install "dask[complete]"
!pip install -Uqq fastai
!pip install parsivar
###Output
Requirement already satisfied: dask[complete] in /usr/local/lib/python3.7/dist-packages (2.12.0)
Collecting partd>=0.3.10
Downloading partd-1.2.0-py3-none-any.whl (19 kB)
Requirement already satisfied: bokeh>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from dask[complete]) (2.3.3)
Collecting distributed>=2.0
Downloading distributed-2021.8.0-py3-none-any.whl (776 kB)
[K |████████████████████████████████| 776 kB 7.5 MB/s
[?25hRequirement already satisfied: toolz>=0.7.3 in /usr/local/lib/python3.7/dist-packages (from dask[complete]) (0.11.1)
Requirement already satisfied: PyYaml in /usr/local/lib/python3.7/dist-packages (from dask[complete]) (3.13)
Requirement already satisfied: numpy>=1.13.0 in /usr/local/lib/python3.7/dist-packages (from dask[complete]) (1.19.5)
Collecting fsspec>=0.6.0
Downloading fsspec-2021.7.0-py3-none-any.whl (118 kB)
[K |████████████████████████████████| 118 kB 68.3 MB/s
[?25hRequirement already satisfied: pandas>=0.23.0 in /usr/local/lib/python3.7/dist-packages (from dask[complete]) (1.1.5)
Requirement already satisfied: cloudpickle>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from dask[complete]) (1.3.0)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from bokeh>=1.0.0->dask[complete]) (2.8.2)
Requirement already satisfied: typing-extensions>=3.7.4 in /usr/local/lib/python3.7/dist-packages (from bokeh>=1.0.0->dask[complete]) (3.7.4.3)
Requirement already satisfied: Jinja2>=2.9 in /usr/local/lib/python3.7/dist-packages (from bokeh>=1.0.0->dask[complete]) (2.11.3)
Requirement already satisfied: tornado>=5.1 in /usr/local/lib/python3.7/dist-packages (from bokeh>=1.0.0->dask[complete]) (5.1.1)
Requirement already satisfied: pillow>=7.1.0 in /usr/local/lib/python3.7/dist-packages (from bokeh>=1.0.0->dask[complete]) (7.1.2)
Requirement already satisfied: packaging>=16.8 in /usr/local/lib/python3.7/dist-packages (from bokeh>=1.0.0->dask[complete]) (21.0)
Requirement already satisfied: msgpack>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from distributed>=2.0->dask[complete]) (1.0.2)
Requirement already satisfied: click>=6.6 in /usr/local/lib/python3.7/dist-packages (from distributed>=2.0->dask[complete]) (7.1.2)
Requirement already satisfied: tblib>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from distributed>=2.0->dask[complete]) (1.7.0)
Requirement already satisfied: zict>=0.1.3 in /usr/local/lib/python3.7/dist-packages (from distributed>=2.0->dask[complete]) (2.0.0)
Collecting cloudpickle>=0.2.1
Downloading cloudpickle-1.6.0-py3-none-any.whl (23 kB)
Requirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from distributed>=2.0->dask[complete]) (57.4.0)
Requirement already satisfied: psutil>=5.0 in /usr/local/lib/python3.7/dist-packages (from distributed>=2.0->dask[complete]) (5.4.8)
Collecting distributed>=2.0
Downloading distributed-2021.7.2-py3-none-any.whl (769 kB)
[K |████████████████████████████████| 769 kB 44.5 MB/s
[?25h Downloading distributed-2021.7.1-py3-none-any.whl (766 kB)
[K |████████████████████████████████| 766 kB 80.2 MB/s
[?25h Downloading distributed-2021.7.0-py3-none-any.whl (1.0 MB)
[K |████████████████████████████████| 1.0 MB 60.5 MB/s
[?25h Downloading distributed-2021.6.2-py3-none-any.whl (722 kB)
[K |████████████████████████████████| 722 kB 60.1 MB/s
[?25h Downloading distributed-2021.6.1-py3-none-any.whl (722 kB)
[K |████████████████████████████████| 722 kB 65.4 MB/s
[?25h Downloading distributed-2021.6.0-py3-none-any.whl (715 kB)
[K |████████████████████████████████| 715 kB 74.4 MB/s
[?25h Downloading distributed-2021.5.1-py3-none-any.whl (705 kB)
[K |████████████████████████████████| 705 kB 78.5 MB/s
[?25hRequirement already satisfied: sortedcontainers!=2.0.0,!=2.0.1 in /usr/local/lib/python3.7/dist-packages (from distributed>=2.0->dask[complete]) (2.4.0)
Downloading distributed-2021.5.0-py3-none-any.whl (699 kB)
[K |████████████████████████████████| 699 kB 79.7 MB/s
[?25h Downloading distributed-2021.4.1-py3-none-any.whl (696 kB)
[K |████████████████████████████████| 696 kB 78.6 MB/s
[?25h Downloading distributed-2021.4.0-py3-none-any.whl (684 kB)
[K |████████████████████████████████| 684 kB 26.4 MB/s
[?25h Downloading distributed-2021.3.1-py3-none-any.whl (679 kB)
[K |████████████████████████████████| 679 kB 65.7 MB/s
[?25h Downloading distributed-2021.3.0-py3-none-any.whl (675 kB)
[K |████████████████████████████████| 675 kB 63.8 MB/s
[?25h Downloading distributed-2021.2.0-py3-none-any.whl (675 kB)
[K |████████████████████████████████| 675 kB 71.1 MB/s
[?25h Downloading distributed-2021.1.1-py3-none-any.whl (672 kB)
[K |████████████████████████████████| 672 kB 67.6 MB/s
[?25h Downloading distributed-2021.1.0-py3-none-any.whl (671 kB)
[K |████████████████████████████████| 671 kB 75.5 MB/s
[?25h Downloading distributed-2020.12.0-py3-none-any.whl (669 kB)
[K |████████████████████████████████| 669 kB 79.6 MB/s
[?25h Downloading distributed-2.30.1-py3-none-any.whl (656 kB)
[K |████████████████████████████████| 656 kB 81.2 MB/s
[?25hRequirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from Jinja2>=2.9->bokeh>=1.0.0->dask[complete]) (2.0.1)
Requirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=16.8->bokeh>=1.0.0->dask[complete]) (2.4.7)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.23.0->dask[complete]) (2018.9)
Collecting locket
Downloading locket-0.2.1-py2.py3-none-any.whl (4.1 kB)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.1->bokeh>=1.0.0->dask[complete]) (1.15.0)
Requirement already satisfied: heapdict in /usr/local/lib/python3.7/dist-packages (from zict>=0.1.3->distributed>=2.0->dask[complete]) (1.0.1)
Installing collected packages: locket, cloudpickle, partd, fsspec, distributed
Attempting uninstall: cloudpickle
Found existing installation: cloudpickle 1.3.0
Uninstalling cloudpickle-1.3.0:
Successfully uninstalled cloudpickle-1.3.0
Attempting uninstall: distributed
Found existing installation: distributed 1.25.3
Uninstalling distributed-1.25.3:
Successfully uninstalled distributed-1.25.3
Successfully installed cloudpickle-1.6.0 distributed-2.30.1 fsspec-2021.7.0 locket-0.2.1 partd-1.2.0
[K |████████████████████████████████| 188 kB 7.1 MB/s
[K |████████████████████████████████| 56 kB 5.6 MB/s
[?25hCollecting parsivar
Downloading parsivar-0.2.3.tar.gz (36.2 MB)
[K |████████████████████████████████| 36.2 MB 62 kB/s
[?25hCollecting nltk==3.4.5
Downloading nltk-3.4.5.zip (1.5 MB)
[K |████████████████████████████████| 1.5 MB 43.5 MB/s
[?25hRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from nltk==3.4.5->parsivar) (1.15.0)
Building wheels for collected packages: parsivar, nltk
Building wheel for parsivar (setup.py) ... [?25l[?25hdone
Created wheel for parsivar: filename=parsivar-0.2.3-py3-none-any.whl size=36492971 sha256=c4300874ac1edf6da2dd3558a0314f22e6ee3193e80dafd1fbaeb46e9bf8c430
Stored in directory: /root/.cache/pip/wheels/ae/67/7a/49cbf08f64d3f76a26eceaf0e481a40e233f05d4356875cbed
Building wheel for nltk (setup.py) ... [?25l[?25hdone
Created wheel for nltk: filename=nltk-3.4.5-py3-none-any.whl size=1449922 sha256=40aca204334d8dbbea69e9af0cae4be1ecfe4ab71943c53aa4f71341ba8c54de
Stored in directory: /root/.cache/pip/wheels/48/8b/7f/473521e0c731c6566d631b281f323842bbda9bd819eb9a3ead
Successfully built parsivar nltk
Installing collected packages: nltk, parsivar
Attempting uninstall: nltk
Found existing installation: nltk 3.2.5
Uninstalling nltk-3.2.5:
Successfully uninstalled nltk-3.2.5
Successfully installed nltk-3.4.5 parsivar-0.2.3
###Markdown
Imports
###Code
import dask.dataframe as dd
import pandas as pd
import os
from fastai.text.all import *
from fastai.tabular.all import *
import pickle
from torch.utils.data import Dataset
from tqdm import tqdm
import torch as T
from fastai.data.core import DataLoaders
import torch.nn as nn
from torch.utils.data.dataloader import DataLoader
from helper import utils
###Output
_____no_output_____
###Markdown
declaring paths
###Code
data_folder = os.path.join(pwd,"data")
final_clicked = os.path.join(data_folder,"final","final_clicked.csv")
final_products = os.path.join(data_folder,"final","final_products.csv")
mix_model_path = os.path.join(data_folder,"models","mix_model")
category_classifier_path = os.path.join(data_folder,"models","category_classifier")
cdf = dd.read_csv(final_clicked)
cdf.head()
###Output
_____no_output_____
###Markdown
loading vocab
###Code
#if vocab exits:
vocab = pickle.load(open(os.path.join(category_classifier_path,"vocab"),'rb'))
vocab
###Output
_____no_output_____
###Markdown
nlp trasnforms pipeline
###Code
class nlp_pipeline:
def __init__(self, vocab,):
self.vocab = vocab
self.tok = SpacyTokenizer(lang='fa')
self.num = Numericalize(vocab=self.vocab)
def encode(self,x):
x = utils._normalize_text(x)
x = tokenize1(x, self.tok)
x = self.num.encodes(x)
return x
def decode(self,x):
x = self.num.decodes(x)
x = " ".join(x)
return x
drop_cols =["Unnamed: 0",
"product_showed",
"DAY(datetime)",
"HOUR(datetime)",
"IS_WEEKEND(datetime)",
"MINUTE(datetime)",
"MONTH(datetime)",
"WEEKDAY(datetime)",
"YEAR(datetime)",
"_id"]
cont_cols = ["rank",
"NUM_WORDS(raw_query)",
"page",
"products.sellers_count",
"products.availabilty_ratio",
"products.mean_all_price",
"products.max_all_price",
"products.min_all_price",
"products.std_all_price",
"products.skew_all_price",
"products.mean_available_price",
"products.max_available_price",
"products.min_available_price",
"products.std_available_price",
"products.skew_available_price",
"products.COUNT(clicked_merged)",
"products.PERCENT_TRUE(clicked_merged.is_clicked)",
"products.NUM_WORDS(product_name_normalized)"]
cat_cols = ['products.DAY(first_added_date)',
'products.DAY(last_added_date)',
'products.HOUR(first_added_date)',
'products.HOUR(last_added_date)',
'products.IS_WEEKEND(first_added_date)',
'products.IS_WEEKEND(last_added_date)',
'products.MINUTE(first_added_date)',
'products.MINUTE(last_added_date)',
'products.MONTH(first_added_date)',
'products.MONTH(last_added_date)',
'products.WEEKDAY(first_added_date)',
'products.WEEKDAY(last_added_date)',
'products.YEAR(first_added_date)',
'products.YEAR(last_added_date)']
text_col = ['raw_query',
'products.category_name',
'products.product_name_normalized',]
target = 'is_clicked'
df = cdf.drop(drop_cols,axis=1).compute()
df
class MixDataSet(Dataset):
def __init__(self, df, cat_col_names, cont_col_names, query_col_name, category_col_name, product_col_name, vocab,target_col_name=None, test=False,normalize=True,):
self.df = df.reset_index(drop=True).copy()
self.cat_col_names = cat_col_names
self.cont_col_names = cont_col_names
self.query_col_name = query_col_name
self.category_col_name = category_col_name
self.product_col_name = product_col_name
self.target_col_name = target_col_name
self.test = test
self.normalize = normalize
self.vocab = vocab
self.nlp_pipeline = nlp_pipeline(vocab=self.vocab)
if self.test == False:
self.target_col_name = target_col_name
def __len__(self):
return self.df.shape[0]
def __getitem__(self, i):
ndf = self.df.iloc[i]
ndf = ndf.fillna(0.0,inplace=False)
if self.normalize == True:
if type(ndf) == pd.core.series.Series:
for col in ndf.index:
if "price" in col:
ndf[col] = np.log10(ndf[col]/1000 + 1)
elif "NUM" in col or "COUNT" in col:
ndf[col] = np.log10(ndf[col] + 1)
elif type(ndf) == pd.core.frame.DataFrame:
for col in ndf.columns:
if "price" in col:
ndf[col] = np.log10(ndf[col]/1000 + 1)
elif "NUM" in col or "COUNT" in col:
ndf[col] = np.log10(ndf[col] + 1)
for col in self.cat_col_names:
if "YEAR" in col:
ndf[col] = ndf[col] - 2017
cat = ndf[self.cat_col_names].values
cont = ndf[self.cont_col_names].values
query = self.nlp_pipeline.encode(ndf[self.query_col_name])
category = self.nlp_pipeline.encode(ndf[self.category_col_name])
product = self.nlp_pipeline.encode(ndf[self.product_col_name])
if self.test == False:
target = ndf[self.target_col_name]
return (T.tensor(cat.astype(np.int32)),T.tensor(cont.astype(np.float32)),query,category,product), T.tensor(target)
else:
return (T.tensor(cat.astype(np.int32)),T.tensor(cont.astype(np.float32)),query,category,product)
stratified_df = pd.concat([df[df["is_clicked"] == True].reset_index(drop=True)[0:10000],
df[df["is_clicked"] == False].reset_index(drop=True)[0:10000]]).reset_index(drop=True).sample(frac=1)
stratified_df.groupby(by="is_clicked").count()
b = MixDataSet(stratified_df,cat_cols,cont_cols,"raw_query","products.category_name","products.product_name_normalized",target_col_name="is_clicked",vocab=vocab[0])
b[0:10]
len(b)
stratified_df["is_valid"] = False
stratified_df["is_valid"] = stratified_df["is_valid"].apply(lambda x: True if random.random() < 0.08 else False)
def my_collate(batch):
b = list(zip(*batch))
x,y = b
x1,x2,x3,x4,x5 = list(zip(*x))
return (T.stack(x1),T.stack(x2), nn.utils.rnn.pad_sequence(x3).T, nn.utils.rnn.pad_sequence(x4).T, nn.utils.rnn.pad_sequence(x5).T), T.stack(y).to(T.long)
train_ds = MixDataSet(stratified_df[stratified_df["is_valid"]==False],
cat_cols,
cont_cols,
"raw_query",
"products.category_name",
"products.product_name_normalized",
target_col_name="is_clicked",
vocab=vocab[0])
valid_ds = MixDataSet(stratified_df[stratified_df["is_valid"]==True],
cat_cols,
cont_cols,
"raw_query",
"products.category_name",
"products.product_name_normalized",
target_col_name="is_clicked",
vocab = vocab[0])
len(train_ds), len(valid_ds)
train_dl = DataLoader(dataset=train_ds, batch_size=150, shuffle=True, collate_fn=my_collate)
valid_dl = DataLoader(dataset=valid_ds, batch_size=150, shuffle=True, collate_fn=my_collate)
next(iter(train_dl))
next(iter(valid_dl))
class MixModel(nn.Module):
def __init__(self, embed_sz, n_cont, layers, awd_config, vocab_sz, lin_ftrs,joint_layers):
super(MixModel, self).__init__()
self.embed_sz = embed_sz
self.n_cont = n_cont
self.layers = layers
self.awd_config = awd_config
self.joint_layers = joint_layers
self.tab_model = TabularModel(emb_szs=self.embed_sz, n_cont=n_cont, out_sz=2,layers=layers,)
self.tab_model.layers = self.tab_model.layers[:-1]
self.awd1 = get_text_classifier(AWD_LSTM,vocab_sz=vocab_sz, n_class=2, config=awd_config,lin_ftrs=lin_ftrs)
self.awd1[-1].layers = self.awd1[-1].layers[:-1]
self.awd2 = get_text_classifier(AWD_LSTM,vocab_sz=vocab_sz, n_class=2, config=awd_config,lin_ftrs=lin_ftrs)
self.awd2[-1].layers = self.awd2[-1].layers[:-1]
self.awd3 = get_text_classifier(AWD_LSTM,vocab_sz=vocab_sz, n_class=2, config=awd_config,lin_ftrs=lin_ftrs)
self.awd3[-1].layers = self.awd3[-1].layers[:-1]
self.joint_layers = [lin_ftrs[-1]*3 + layers[-1]] + joint_layers + [2]
linBins = []
for i in range(0,len(self.joint_layers)-1):
linBins.append(LinBnDrop(self.joint_layers[i],self.joint_layers[i+1]))
self.LinBins = nn.Sequential(*linBins)
self.Softmax = nn.Softmax(dim=1)
def forward(self, x):
xtab = self.tab_model(x[0],x[1])
xnlp1 = self.awd1(x[2])[0]
xnlp2 = self.awd2(x[3])[0]
xnlp3 = self.awd3(x[4])[0]
X = T.cat([xtab, xnlp1, xnlp2, xnlp3],dim=1)
X = self.LinBins(X)
return self.Softmax(X)
def reset(self):
self.awd1.reset()
self.awd2.reset()
self.awd3.reset()
for col in cat_cols:
print(col,"-->", df[col].nunique())
len(cont_cols)
emb_sz = [
(32, 20),
(32, 20),
(24, 20),
(24, 20),
(3, 10),
(3, 10),
(61, 30),
(61, 30),
(13, 10),
(13, 10),
(8, 10),
(8, 10),
(6, 20),
(6, 20)
]
awd_conf = {'bidir': True,
'emb_sz': 1000,
'embed_p': 0.05,
'hidden_p': 0.3,
'input_p': 0.4,
'n_hid': 1000,
'n_layers': 3,
'output_p': 0.4,
'pad_token': 1,
'weight_p': 0.5}
mix_model = MixModel(embed_sz = emb_sz, n_cont= len(cont_cols), layers=[200], awd_config=awd_conf, vocab_sz=len(vocab[0]), lin_ftrs=[500],joint_layers=[500])
state_dict = T.load(open(os.path.join(mix_model_path,"models","mix_model.pth"),'rb'))
mix_model.load_state_dict(state_dict)
mix_model
sum(p.numel() for p in mix_model.parameters())
a = next(iter(train_dl))
tab = mix_model.tab_model(a[0][0],a[0][1])
nlp1 = mix_model.awd1(a[0][2])
nlp2 = mix_model.awd2(a[0][3])
nlp3 = mix_model.awd3(a[0][4])
print(tab.shape, nlp1[0].shape, nlp2[0].shape, nlp3[0].shape)
mix_model(a[0])
fast_mix_dl = DataLoaders(train_dl,valid_dl)
loss_func = nn.CrossEntropyLoss()
learn = Learner(fast_mix_dl,
mix_model,
loss_func = loss_func,
path = mix_model_path,
metrics=[accuracy,error_rate,Recall(),Precision(),F1Score()]).to_fp16()
grp = ShowGraphCallback
svm = SaveModelCallback(at_end=False,every_epoch=False,reset_on_fit=False,monitor='f1_score',fname="mix_model",)
esc = EarlyStoppingCallback(patience=3)
rlr = ReduceLROnPlateau(monitor="valid_loss",patience=2,factor=10,)
learn.add_cbs([grp,svm,esc,rlr,ModelResetter])
learn.cbs
learn.lr_find()
learn.fine_tune(10,1e-04,10)
stratified_df = pd.concat([df[df["is_clicked"] == True].reset_index(drop=True)[0:30000],
df[df["is_clicked"] == False].reset_index(drop=True)[0:30000]]).reset_index(drop=True).sample(frac=1)
stratified_df["is_valid"] = False
stratified_df["is_valid"] = stratified_df["is_valid"].apply(lambda x: True if random.random() < 0.08 else False)
train_ds1 = MixDataSet(stratified_df[stratified_df["is_valid"]==False],
cat_cols,
cont_cols,
"raw_query",
"products.category_name",
"products.product_name_normalized",
target_col_name="is_clicked",
vocab=vocab[0])
valid_ds1 = MixDataSet(stratified_df[stratified_df["is_valid"]==True],
cat_cols,
cont_cols,
"raw_query",
"products.category_name",
"products.product_name_normalized",
target_col_name="is_clicked",
vocab = vocab[0])
train_dl1 = DataLoader(dataset=train_ds1, batch_size=200, shuffle=True, collate_fn=my_collate)
valid_dl1 = DataLoader(dataset=valid_ds1, batch_size=200, shuffle=True, collate_fn=my_collate)
fast_mix_dl1 = DataLoaders(train_dl1,valid_dl1)
loss_func = nn.CrossEntropyLoss()
learn = Learner(fast_mix_dl1,
mix_model,
loss_func = loss_func,
path = mix_model_path,
metrics=[accuracy,error_rate,Recall(),Precision(),F1Score()]).to_fp16()
grp = ShowGraphCallback
svm = SaveModelCallback(at_end=False,every_epoch=False,reset_on_fit=False,monitor='f1_score',fname="mix_model",)
esc = EarlyStoppingCallback(patience=3)
rlr = ReduceLROnPlateau(monitor="valid_loss",patience=2,factor=10,)
learn.add_cbs([grp,svm,esc,rlr,ModelResetter])
learn.cbs
learn.fine_tune(10,1e-04,10)
###Output
_____no_output_____ |
development/negatives-filtering-2.ipynb | ###Markdown
Data loading and processing
###Code
files = glob.glob('data/original/*/*')
print(len(files))
files[:10]
negatives_files = [f for f in files if 'NEG' in f]
positives_files = [f for f in files if 'NEG' not in f]
len(negatives_files), len(positives_files)
%%time
neg_imgs = np.array([preprocess(cv.imread(f), normalize=False, equalize=False) for f in negatives_files])
pos_imgs = np.array([preprocess(cv.imread(f), normalize=False, equalize=False) for f in positives_files])
neg_imgs.shape, pos_imgs.shape
pos_means = np.mean(pos_imgs.reshape(pos_imgs.shape[0], -1), axis=1)
neg_means = np.mean(neg_imgs.reshape(neg_imgs.shape[0], -1), axis=1)
pos_stds = np.std(pos_imgs.reshape(pos_imgs.shape[0], -1), axis=1)
neg_stds = np.std(neg_imgs.reshape(neg_imgs.shape[0], -1), axis=1)
means = np.concatenate([neg_means, pos_means])
stds = np.concatenate([neg_stds, pos_stds])
x = np.column_stack([means, stds])
y = np.array([0] * len(neg_imgs) + [1] * len(pos_imgs))
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=1)
###Output
_____no_output_____
###Markdown
Data visualization
###Code
f, a = plt.subplots(1, 2)
f.set_size_inches(15, 6)
for ax in a:
ax.scatter(neg_means, neg_stds, c='blue', s=3, label='negative samples')
ax.scatter(pos_means, pos_stds, c='red', s=3, label='positive samples')
ax.xaxis.set_label('mean pixel intentity')
ax.yaxis.set_label('pixel intensity std')
ax.legend()
a[1].set_xlim(-1, 15)
a[1].set_ylim(0, 10)
plt.show()
###Output
_____no_output_____
###Markdown
Data saving
###Code
np.savez('neg-classifier-data.npz', x_train=x_train, y_train=y_train)
###Output
_____no_output_____
###Markdown
Data loading
###Code
data = np.load('neg-classifier-data.npz')
x_train = data['x_train']
y_train = data['y_train']
###Output
_____no_output_____
###Markdown
Classifier
###Code
classifier = SVC(C=0.9, kernel='poly', degree=2, gamma=0.35)
classifier.fit(x_train, y_train)
###Output
_____no_output_____
###Markdown
Decision boundary
###Code
h = 0.05
cmap_light = ListedColormap(['#AAAAFF', '#FFAAAA'])
cmap_bold = ListedColormap(['#0000FF', '#FF0000'])
x_min, x_max = -1, 15
y_min, y_max = 0, 10
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = classifier.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
plt.scatter(x[:, 0], x[:, 1], c=y, cmap=cmap_bold, s=4)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title('classifier decision boundary')
plt.xlabel('mean')
plt.ylabel('std')
plt.show()
###Output
_____no_output_____
###Markdown
Results
###Code
train_preds = classifier.predict(x_train)
test_preds = classifier.predict(x_test)
train_acc = np.mean(train_preds == y_train)
test_acc = np.mean(test_preds == y_test)
print(f'train accuracy: {train_acc}')
print(f'test accuracy: {test_acc}')
###Output
train accuracy: 1.0
test accuracy: 0.9972144846796658
|
P0/P0_wip.ipynb | ###Markdown
TASK 1:How many different telephone numbers are there in the records? Print a message:"There are different telephone numbers in the records."
###Code
#input: texts list:the first two items in the lists are phone numbers.
#calls list: the first two items in the lists are phone numbers.
#output: unique count in the records
#method: combine all the phone numbers into a list, then make a set of them.
phone_book = []
for i in range(len(texts)):
phone_book.append(texts[i][0])
phone_book.append(texts[i][1])
for i in range(len(calls)):
phone_book.append(calls[i][0])
phone_book.append(calls[i][1])
ct_phone_number = len(set(phone_book))
print("There are {} different telephone numbers in the records.".format(ct_phone_number))
###Output
There are 570 different telephone numbers in the records.
###Markdown
TASK 2: Which telephone number spent the longest time on the phoneduring the period? Don't forget that time spent answering a call isalso time spent on the phone.Print a message:" spent the longest time, seconds, on the phone during September 2016.". Input: a call list containing list of records. The first two elements in the list are phone numbers. The last item with 3 as index is the duration.Output: phone number with the longest time and the duration.Method: 1. If I'm in SQL, SELECT the incoming phone number, and the duration, Union ALL the answering phone number and the duration. Use where clause to only include calls in September 2016 -- this is from the timestamp column. Using a group by and order by the duration in desc order. Pick the first record.2. How to do this in pandas?3. If not importing pandas - only use python. What would I do? 1. final goal: create a dictionary with phone number: duration 2. steps: 1. check timestamp to make sure its in september 2. get incoming , if it's not in the list, add incoming : duration. otherwise, add duration to the original duration.
###Code
from datetime import datetime
phone_dict = {}
phone_max = None
duration_max = 0
start_time = datetime.strptime("01-09-2016 00:00:00","%d-%m-%Y %H:%M:%S")
end_time = datetime.strptime("01-10-2016 00:00:00","%d-%m-%Y %H:%M:%S")
for i in range(len(calls)):
dt = datetime.strptime(calls[i][2],"%d-%m-%Y %H:%M:%S")
if ((dt >= start_time) and (dt < end_time)):
if calls[i][0] not in phone_dict:
phone_dict[calls[i][0]] = int(calls[i][3])
else:
phone_dict[calls[i][0]] += int(calls[i][3])
if duration_max < phone_dict[calls[i][0]]:
duration_max = phone_dict[calls[i][0]]
phone_max = calls[i][0]
if calls[i][1] not in phone_dict:
phone_dict[calls[i][1]] = int(calls[i][3])
else:
phone_dict[calls[i][1]] += int(calls[i][3])
if duration_max < phone_dict[calls[i][1]]:
duration_max = phone_dict[calls[i][1]]
phone_max = calls[i][1]
print("{} spent the longest time, {} seconds, on the phone during September 2016.".format(phone_max,duration_max))
calls[1][3]
from datetime import datetime
new_time = datetime.strptime(calls[1][2],"%d-%m-%Y %H:%M:%S")
start_time = datetime.strptime("01-09-2016 00:00:00","%d-%m-%Y %H:%M:%S")
end_time = datetime.strptime("01-10-2016 00:00:00","%d-%m-%Y %H:%M:%S")
print(start_time < new_time)
###Output
True
###Markdown
How should I test this? What is the format of the timestamp? How do I find out? Task 4:The telephone company want to identify numbers that might be doingtelephone marketing. Create a set of possible telemarketers:these are numbers that make outgoing calls but never send texts,receive texts or receive incoming calls. 1. Create a set of text numbers and answering calls numbers 2. go through the listPrint a message:"These numbers could be telemarketers: "The list of numbers should be print out one per line in lexicographic order with no duplicates. - they need to be sorted.
###Code
people_set = set()
call_set = set()
telem_list = []
for i in range(len(texts)):
people_set.add(texts[i][0])
people_set.add(texts[i][1])
for i in range(len(calls)):
people_set.add(calls[i][1])
call_set.add(calls[i][0])
for element in call_set:
if element not in people_set:
telem_list.append(element)
telem_list.sort()
print("These numbers could be telemarketers:")
print(*telem_list, sep = "\n")
###Output
These numbers could be telemarketers:
(022)37572285
(022)65548497
(022)68535788
(022)69042431
(040)30429041
(044)22020822
(0471)2171438
(0471)6579079
(080)20383942
(080)25820765
(080)31606520
(080)40362016
(080)60463379
(080)60998034
(080)62963633
(080)64015211
(080)69887826
(0821)3257740
1400481538
1401747654
1402316533
1403072432
1403579926
1404073047
1404368883
1404787681
1407539117
1408371942
1408409918
1408672243
1409421631
1409668775
1409994233
74064 66270
78291 94593
87144 55014
90351 90193
92414 69419
94495 03761
97404 30456
97407 84573
97442 45192
99617 25274
###Markdown
TASK 3:(080) is the area code for fixed line telephones in Bangalore.Fixed line numbers include parentheses, so Bangalore numbershave the form (080)xxxxxxx.)Part A: Find all of the area codes and mobile prefixes called by peoplein Bangalore. - Fixed lines start with an area code enclosed in brackets. The area codes vary in length but always begin with 0. - Mobile numbers have no parentheses, but have a space in the middle of the number to help readability. The prefix of a mobile number is its first four digits, and they always start with 7, 8 or 9. - Telemarketers' numbers have no parentheses or space, but they start with the area code 140.Print the answer as part of a message:"The numbers called by people in Bangalore have codes:" The list of codes should be print out one per line in lexicographic order with no duplicates.Part B: What percentage of calls from fixed lines in Bangalore are madeto fixed lines also in Bangalore? In other words, of all the calls madefrom a number starting with "(080)", what percentage of these callswere made to a number also starting with "(080)"?Print the answer as a part of a message::" percent of calls from fixed lines in Bangalore are callsto other fixed lines in Bangalore."The percentage should have 2 decimal digits
###Code
# PartB:
b_call_ct = 0 # Initiate # of calls made by 080
b_answer_ct = 0 # Initiate # of calls answered by 080 and made by 080
for i in range(len(calls)):
if calls[i][0].startswith("(080)"):
b_call_ct += 1
if calls[i][1].startswith("(080)"):
b_answer_ct += 1
b_answer_percent = round(b_answer_ct/b_call_ct*100,2)
print("{} percent of calls from fixed lines in Bangalore are calls to other fixed lines in Bangalore.".format(b_answer_percent))
# PartA
b_answer_area = set() # Initiate a set of area codes (080) called
for i in range(len(calls)):
if not calls[i][0].startswith("(080)"):
continue
if calls[i][1].startswith("("):
# fixed lines
idx = (calls[i][1].find(")"))+1
b_answer_area.add(calls[i][1][:idx])
elif calls[i][1].startswith("140"):
# telemarketer
b_answer_area.add("140")
else:
#mobile
b_answer_area.add(calls[i][1][:4])
b_answer_area_list = list(b_answer_area)
b_answer_area_list.sort()
print("The numbers called by people in Bangalore have codes:")
print(*b_answer_area_list, sep = "\n")
l1 = [2, 40, 13, 24, 1, 2, 5, 6, 3, 50, 42]
def construct_set1(l):
new_set = set()
for idx, element in enumerate(l):
if element in l[0:idx]:
continue
new_set.add(element)
return new_set
construct_set1(l1)
def construct_set2(l):
new_set = set()
l.sort() #O(N*logN)
for idx, element in enumerate(l):
if element == l[idx-1]: # 1
#if binary_search(l[0:idx], element): #log N
continue
new_set.add(element)
return new_set
construct_set2(l1)
def construct_set3(l):
new_set = set()
for idx, element in enumerate(l):
if element in new_set: #1
continue
new_set.add(element) #1
return new_set
construct_set3(l1)
def construct_set4(l):
new_set = set()
for idx, element in enumerate(l):
new_set.add(element) #1
return new_set
construct_set4(l1)
a=[1,2,3]
a[::-1]
def add_one(arr):
"""
:param: arr - list of digits representing some number x
return a list with digits represengint (x + 1)
"""
reversed_arr = arr[::-1]
for i in range(len(reversed_arr)):
if reversed_arr[i] < 9:
reversed_arr[i] += 1
break
else:
reversed_arr[i] = 0
if reversed_arr[len(reversed_arr)-1] = 0:
reversed_arr.append(1)
return reversed_arr[::-1]
'''
Why is this wrong? It didn't change the list?
reversed_arr = arr[::-1]
for element in reversed_arr:
if element < 9:
element += 1
break
else:
element = 0
return reversed_arr[::-1]
'''
add_one([1,2,3])
arr = [9,9,9]
reversed_arr = arr[::-1]
reversed_arr
for i in range(len(reversed_arr)):
if reversed_arr[i] < 9:
reversed_arr[i] += 1
print(reversed_arr)
for i in range(len(reversed_arr):0:-1):
if reversed_arr[i] < 9:
reversed_arr[i] += 1
print(reversed_arr)
arr = [1,2,3]
for i in range(len(arr)-1,0,-1):
if arr[i] < 9:
arr[i] += 1
break
else:
arr[i] = 0
if arr[0] == 0:
arr.insert(0,1)
print(arr)
for i in range(len(arr)-1,-1,-1):
print(i)
def add_one1(arr):
"""
:param: arr - list of digits representing some number x
return a list with digits represengint (x + 1)
"""
for i in range(len(arr)-1,-1,-1):
if arr[i] < 9:
arr[i] += 1
break
else:
arr[i] = 0
if arr[0] == 0:
arr.insert(0,1)
return arr
def duplicate_number(arr):
"""
:param - array containing numbers in the range [0, len(arr) - 2]
return - the number that is duplicate in the arr
"""
arr.sort()
for i in range(len(arr)):
if arr[i] == arr[i+1]:
break
return arr[i]
# is arr.sort() sort in place? Or does it creates additional memory?
###Output
_____no_output_____ |
Week15/WhirlwindTourOfPython-6f1daf714fe52a8dde6a288674ba46a7feed8816/03-Semantics-Variables.ipynb | ###Markdown
*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).**The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).* Basic Python Semantics: Variables and Objects This section will begin to cover the basic semantics of the Python language.As opposed to the *syntax* covered in the previous section, the *semantics* of a language involve the meaning of the statements.As with our discussion of syntax, here we'll preview a few of the essential semantic constructions in Python to give you a better frame of reference for understanding the code in the following sections.This section will cover the semantics of *variables* and *objects*, which are the main ways you store, reference, and operate on data within a Python script. Python Variables Are PointersAssigning variables in Python is as easy as putting a variable name to the left of the equals (``=``) sign:```python assign 4 to the variable xx = 4```This may seem straightforward, but if you have the wrong mental model of what this operation does, the way Python works may seem confusing.We'll briefly dig into that here.In many programming languages, variables are best thought of as containers or buckets into which you put data.So in C, for example, when you write```C// C codeint x = 4;```you are essentially defining a "memory bucket" named ``x``, and putting the value ``4`` into it.In Python, by contrast, variables are best thought of not as containers but as pointers.So in Python, when you write```pythonx = 4```you are essentially defining a *pointer* named ``x`` that points to some other bucket containing the value ``4``.Note one consequence of this: because Python variables just point to various objects, there is no need to "declare" the variable, or even require the variable to always point to information of the same type!This is the sense in which people say Python is *dynamically-typed*: variable names can point to objects of any type.So in Python, you can do things like this:
###Code
x = 1 # x is an integer
x = 'hello' # now x is a string
x = [1, 2, 3] # now x is a list
###Output
_____no_output_____
###Markdown
While users of statically-typed languages might miss the type-safety that comes with declarations like those found in C,```Cint x = 4;```this dynamic typing is one of the pieces that makes Python so quick to write and easy to read.There is a consequence of this "variable as pointer" approach that you need to be aware of.If we have two variable names pointing to the same *mutable* object, then changing one will change the other as well!For example, let's create and modify a list:
###Code
x = [1, 2, 3]
y = x
###Output
_____no_output_____
###Markdown
We've created two variables ``x`` and ``y`` which both point to the same object.Because of this, if we modify the list via one of its names, we'll see that the "other" list will be modified as well:
###Code
print(y)
x.append(4) # append 4 to the list pointed to by x
print(y) # y's list is modified as well!
###Output
[1, 2, 3, 4]
###Markdown
This behavior might seem confusing if you're wrongly thinking of variables as buckets that contain data.But if you're correctly thinking of variables as pointers to objects, then this behavior makes sense.Note also that if we use "``=``" to assign another value to ``x``, this will not affect the value of ``y`` – assignment is simply a change of what object the variable points to:
###Code
x = 'something else'
print(y) # y is unchanged
###Output
[1, 2, 3, 4]
###Markdown
Again, this makes perfect sense if you think of ``x`` and ``y`` as pointers, and the "``=``" operator as an operation that changes what the name points to.You might wonder whether this pointer idea makes arithmetic operations in Python difficult to track, but Python is set up so that this is not an issue. Numbers, strings, and other *simple types* are immutable: you can't change their value – you can only change what values the variables point to.So, for example, it's perfectly safe to do operations like the following:
###Code
x = 10
y = x
x += 5 # add 5 to x's value, and assign it to x
print("x =", x)
print("y =", y)
###Output
x = 15
y = 10
###Markdown
When we call ``x += 5``, we are not modifying the value of the ``10`` object pointed to by ``x``; we are rather changing the variable ``x`` so that it points to a new integer object with value ``15``.For this reason, the value of ``y`` is not affected by the operation. Everything Is an ObjectPython is an object-oriented programming language, and in Python everything is an object.Let's flesh-out what this means. Earlier we saw that variables are simply pointers, and the variable names themselves have no attached type information.This leads some to claim erroneously that Python is a type-free language. But this is not the case!Consider the following:
###Code
x = 4
type(x)
x = 'hello'
type(x)
x = 3.14159
type(x)
###Output
_____no_output_____
###Markdown
Python has types; however, the types are linked not to the variable names but *to the objects themselves*.In object-oriented programming languages like Python, an *object* is an entity that contains data along with associated metadata and/or functionality.In Python everything is an object, which means every entity has some metadata (called *attributes*) and associated functionality (called *methods*).These attributes and methods are accessed via the dot syntax.For example, before we saw that lists have an ``append`` method, which adds an item to the list, and is accessed via the dot ("``.``") syntax:
###Code
L = [1, 2, 3]
L.append(100)
print(L)
###Output
[1, 2, 3, 100]
###Markdown
While it might be expected for compound objects like lists to have attributes and methods, what is sometimes unexpected is that in Python even simple types have attached attributes and methods.For example, numerical types have a ``real`` and ``imag`` attribute that returns the real and imaginary part of the value, if viewed as a complex number:
###Code
x = 4.5
print(x.real, "+", x.imag, 'i')
###Output
4.5 + 0.0 i
###Markdown
Methods are like attributes, except they are functions that you can call using opening and closing parentheses.For example, floating point numbers have a method called ``is_integer`` that checks whether the value is an integer:
###Code
x = 4.5
x.is_integer()
x = 4.0
x.is_integer()
###Output
_____no_output_____
###Markdown
When we say that everything in Python is an object, we really mean that *everything* is an object – even the attributes and methods of objects are themselves objects with their own ``type`` information:
###Code
type(x.is_integer)
###Output
_____no_output_____ |
myapps/jupyter/TimeSeries/Time series prediction.ipynb | ###Markdown
Time Series with Python© Francesco Mosconi, 2016 Regression- detrending- lagged variables- train-test split- validation
###Code
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
from pylab import rcParams
rcParams['figure.figsize'] = 11, 7
###Output
_____no_output_____
###Markdown
Regression
###Code
df = pd.read_csv('retail_sales.csv')
df.head()
df = df.set_index(pd.to_datetime(df['Period'])).drop('Period', axis=1)
df.plot(legend=False)
plt.title('Retail Sales US 1992-2016')
###Output
_____no_output_____
###Markdown
Rescale values
###Code
df /= 1e3
df.plot(legend=False)
plt.title('Retail Sales US 1992-2016')
###Output
_____no_output_____
###Markdown
Julian dateUsing Julian dates turns dates to a real number, making it easier to build a regression.We also remove the date of the first day, so that dates will start from zero. Notice that Julian dates are measured in days.
###Code
df['Julian'] = df.index.to_julian_date() - df.index.to_julian_date().min()
df.head()
###Output
_____no_output_____
###Markdown
The most important thing: split Past and Future
###Code
cutoff = pd.Timestamp('2013-01-01')
train = df.loc[:cutoff].copy()
test = df.loc[cutoff:].copy()
train['Value'].plot()
test['Value'].plot()
###Output
_____no_output_____
###Markdown
Simplest trend: linear
###Code
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
X_train = train[['Julian']]
X_test = test[['Julian']]
y_train = train['Value']
y_test = test['Value']
lr.fit(X_train, y_train)
lr.score(X_test, y_test)
train['linear_prediction'] = lr.predict(X_train)
test['linear_prediction'] = lr.predict(X_test)
df['Value'].plot()
train['linear_prediction'].plot()
test['linear_prediction'].plot()
###Output
_____no_output_____
###Markdown
Exercise:Play around with the cutoff date.- How does the regression score change?- What happens if you move the cutoff date to before the 2008 crisis? Predicting the stationary component
###Code
df['linear_prediction'] = lr.predict(df[['Julian']])
df['value_minus_linear'] = df['Value'] - df['linear_prediction']
df['value_minus_linear'].plot()
###Output
_____no_output_____
###Markdown
Delayed variables
###Code
def add_shifts(tdf):
df = tdf.copy()
for i in xrange(1,24):
df['shift_'+str(i)] = df['value_minus_linear'].shift(i).fillna(0)
return df
add_shifts(df).head()
cutoff = pd.Timestamp('2013-01-01')
train = df.loc[:cutoff].copy()
test = df.loc[cutoff:].copy()
train['value_minus_linear'].plot()
test['value_minus_linear'].plot()
train = add_shifts(train)
test = add_shifts(test)
model = LinearRegression()
features = ['Julian'] + list(train.loc[:, 'shift_1':].columns)
print features
X_train = train[features]
X_test = test[features]
y_train = train['value_minus_linear']
y_test = test['value_minus_linear']
model.fit(X_train, y_train)
plt.plot(y_train.values)
plt.plot(model.predict(X_train))
plt.plot(y_test.values)
plt.plot(model.predict(X_test))
###Output
_____no_output_____
###Markdown
first year predictions are bad, why?
###Code
coefs = model.coef_[np.abs(model.coef_) > 0.]
cols = X_train.columns[np.abs(model.coef_) > 0.]
plt.figure(figsize=(10,7))
s = pd.Series(coefs, index=cols).sort_values()
s.plot(kind='bar', fontsize=18)
plt.title('Non-null feature coefficients', fontsize=20)
model.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
ExerciseScikit Learn offers many other regression models. Try experimenting with any of the following models and see if you can improve your test score.
###Code
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import ARDRegression, BayesianRidge, ElasticNet, Hinge, Huber
from sklearn.linear_model import Lars, Lasso, LassoLars, ModifiedHuber
from sklearn.linear_model import MultiTaskElasticNet, MultiTaskLasso, OrthogonalMatchingPursuit
from sklearn.linear_model import PassiveAggressiveRegressor, RANSACRegressor, RandomizedLasso
from sklearn.linear_model import Ridge, SGDRegressor, TheilSenRegressor
###Output
_____no_output_____ |
notebooks/002-shifting-moons-exp.ipynb | ###Markdown
Shifting moons with dynamic rotation
###Code
%load_ext autoreload
%autoreload 2
import jax
import optax
import dojax
import jax.numpy as jnp
import flax.linen as nn
import pandas as pd
import matplotlib.pyplot as plt
from celluloid import Camera
from sklearn.datasets import make_moons
from sklearn.decomposition import PCA, KernelPCA
from sklearn.manifold import TSNE
%config InlineBackend.figure_format = "retina"
plt.rcParams["axes.spines.right"] = False
plt.rcParams["axes.spines.top"] = False
X, y = make_moons(n_samples=100, noise=0.12, random_state=314)
class MLP(nn.Module):
@nn.compact
def __call__(self, x):
x = nn.relu(nn.Dense(30)(x))
x = nn.relu(nn.Dense(30, name="last_layer")(x))
x = nn.Dense(1)(x)
x = nn.sigmoid(x)
return x
model = MLP()
loss = dojax.make_mse_func(model, X, y)
batch = jnp.ones((1, 2))
key = jax.random.PRNGKey(314)
params = model.init(key, batch)
alpha = 0.1
tx = optax.adam(learning_rate=alpha)
opt_state = tx.init(params)
loss_grad_fn = jax.value_and_grad(loss)
for i in range(201):
loss_val, grads = loss_grad_fn(params)
updates, opt_state = tx.update(grads, opt_state)
params = optax.apply_updates(params, updates)
if i % 50 == 0:
print('Loss step {}: '.format(i), loss_val)
###Output
Loss step 0: 0.09936693
Loss step 50: 0.0002880705
Loss step 100: 5.81348e-06
Loss step 150: 3.87713e-06
Loss step 200: 3.0246852e-06
###Markdown
Multiple
###Code
def estimate_weights(model, X, y, key, optimizer, n_epochs=200, seed=None, output_progress=False, print_final_loss=False):
loss = dojax.make_mse_func(model, X, y)
batch = jnp.ones((1, 2))
params = model.init(key, batch)
opt_state = optimizer.init(params)
loss_grad_fn = jax.value_and_grad(loss)
for i in range(n_epochs):
loss_val, grads = loss_grad_fn(params)
updates, opt_state = tx.update(grads, opt_state)
params = optax.apply_updates(params, updates)
if i % 50 == 0 and output_progress:
print('Loss step {}: '.format(i), loss_val)
if print_final_loss:
print(f"Final loss: {loss_val}")
output = {
"params": params,
"final_loss": loss_val,
"train_accuracy": (model.apply(params, X).round().ravel() == y).mean()
}
return output
model = MLP()
alpha = 0.1
tx = optax.adam(learning_rate=alpha)
X, y = make_moons(n_samples=100, noise=0.12, random_state=314)
X = jnp.einsum("nm,mk->nk", X, dojax.rotation_matrix(0))
res = estimate_weights(model, X, y, key, tx)
params = res["params"]
params_flat, _ = jax.flatten_util.ravel_pytree(params)
pred_map = jax.vmap(model.apply, (None, 1))
pred_map = jax.vmap(pred_map, (None, 2))
X_grid = jnp.mgrid[-2:2.5:0.1, -2:2.5:0.1]
Z = jnp.einsum("ijn->ji", pred_map(params, X_grid))
plt.contourf(*X_grid, Z, cmap="bone")
plt.scatter(*X.T, c=y, cmap="Dark2")
###Output
_____no_output_____
###Markdown
Weight's dynamics
###Code
def train_mlp_model(key, data_generator, model, optimiser, eval_elements,
n_epochs=200, centre=False, **kwargs):
"""
Train an MLP model iterating over eval elements and applying each element to the
data-gerating process.
Parameters
----------
data_generator: function
Data generation function. It returns a tuple of
X, y elements.
model: Flax model
The model to train the X and y elements.
optimiser: Optax element
Optimiser to train falx model
eval_elements: array
The range of values to iterate the model configuration
n_epoch: int
Number of epochs to train the model
centre: Bool
Whether to centre the data
Returns
-------
dictionary
"""
data_hist = []
params_hist = []
train_acc_hist = []
n_elements = len(eval_elements)
for it, val in enumerate(eval_elements):
X, y = data_generator(val)
X_train = X.copy()
if centre:
X_train = X_train - X_train.mean(axis=0, keepdims=True)
res = estimate_weights(model, X_train, y, key, optimiser, n_epochs=n_epochs)
params = res["params"]
loss = res["final_loss"]
train_acc = res["train_accuracy"].item()
data_hist.append([X, y])
params_hist.append(params)
train_acc_hist.append(train_acc)
print(f"@it: {it+1:03}/{n_elements:03} || {loss=:0.4e}", end="\r")
hist = {
"data": data_hist,
"params": params_hist,
"train_accuracy": jnp.array(train_acc_hist)
}
return hist
def flat_and_concat_params(params_hist):
"""
Flat and concat a list of parameters trained using
a Flax model
Parameters
----------
params_hist: list of flax FrozenDicts
List of flax FrozenDicts containing trained model
weights.
Returns
-------
jnp.array: flattened and concatenated weights
"""
flat_params = [jax.flatten_util.ravel_pytree(params)[0] for params in params_hist]
flat_params = jnp.r_[flat_params]
return flat_params
def make_rotating_moons(radians, n_samples=100, **kwargs):
"""
Make two interleaving half circles rotated by 'radians' radians
Parameters
----------
radians: float
Angle of rotation
n_samples: int
Number of samples
**kwargs:
Extra arguments passed to the `make_moons` function
"""
X, y = make_moons(n_samples=n_samples, **kwargs)
X = jnp.einsum("nm,mk->nk", X, dojax.rotation_matrix(radians))
return X, y
def make_rotating_translating_moons(radians, n_samples=100, **kwargs):
"""
Make two interleaving half circles rotated by 'radians' radians
Parameters
----------
radians: float
Angle of rotation
n_samples: int
Number of samples
**kwargs:
Extra arguments passed to the `make_moons` function
"""
X, y = make_moons(n_samples=n_samples, **kwargs)
# 1. rotate
X = jnp.einsum("nm,mk->nk", X, dojax.rotation_matrix(radians))
# 2. translate
X = X + 4 * jnp.c_[jnp.cos(radians), jnp.sin(2 * radians)]
return X, y
###Output
_____no_output_____
###Markdown
Rotate
###Code
alpha = 0.05
n_steps = 200
model = MLP()
key = jax.random.PRNGKey(314)
tx = optax.adam(learning_rate=alpha)
radii = jnp.linspace(0, 2 * 2 * jnp.pi, n_steps)
hist = train_mlp_model(key, lambda rad: make_rotating_moons(rad, noise=0.2), model, tx, radii)
data_hist, params_hist = hist["data"], hist["params"]
import os
import pickle
from datetime import datetime
date_fmt = "%y%m%d%H%m"
date_str = datetime.now().strftime(date_fmt)
file_params_name = f"moons-rotating-params-{date_str}.pkl"
file_params_name = os.path.join("outputs", file_params_name)
file_dataset_name = f"moons-rotating-dataset-{date_str}.pkl"
file_dataset_name = os.path.join("outputs", file_dataset_name)
with open(file_params_name, "wb") as f:
pickle.dump(params_hist, f)
with open(file_dataset_name, "wb") as f:
pickle.dump(data_hist, f)
import pickle
file_name = "./outputs/moons-rotating-params-2203010703.pkl"
with open(file_name, "rb") as f:
params_hist = pickle.load(f)
params_flat_hist = flat_and_concat_params(params_hist)
proj = PCA(n_components=2)
# proj = KernelPCA(n_components=2, kernel="cosine")
# proj = TSNE(n_components=2, init="pca", random_state=314, perplexity=10)
w_transformed = proj.fit_transform(params_flat_hist)
plt.title("Projected weights")
domain = radii % (2 * jnp.pi)
plt.scatter(*w_transformed.T, c=domain, cmap="twilight")
plt.axis("equal");
def plot_history(params_hist, data_hist, ranges):
"""
Animate projected weight dynamics and observations
Parameters
----------
params_hist: list of FrozenDict
List of trained weights
data_hist: list of (array(N).
"""
...
fig, ax = plt.subplots(1, 2, figsize=(12, 4))
camera = Camera(fig)
xmin, xmax = -2, 2
ymin, ymax = -2, 2
X_grid = jnp.mgrid[xmin:xmax:0.1, ymin:ymax:0.1]
ax[0].set_title("Observation space")
ax[1].set_title("(projected) weight space")
for it in range(n_steps):
# plt.cla()
params = params_hist[it]
X_step, y_step = data_hist[it]
Z = jnp.einsum("ijn->ji", pred_map(params, X_grid))
ax[1].scatter(*w_transformed[:it].T, c=radii[:it] % (2 * jnp.pi), cmap="twilight")
ax[1].axis("equal")
ax[0].contourf(*X_grid, Z, cmap="bone")
ax[0].scatter(*X_step.T, c=y_step, cmap="Dark2")
ax[0].set_xlim(xmin, xmax)
ax[0].set_ylim(ymin, ymax)
camera.snap()
animation = camera.animate()
# animation.save('half-moons.gif', writer = 'imagemagick')
animation.save('half-moons.mp4', fps=20, dpi=150)
###Output
_____no_output_____
###Markdown
Approximate projectionIn this section, we investigate the effect of projecting the weights to a linear subspace and projecting them back to the full space using an approximate mapping. In this test, we take $d$ number of components and
###Code
def create_comparisson_df(model, projected_weights, full_weights, configurations, projection,
test_samples=100, seed=None):
hist_values = []
n_configurations = len(configurations)
iterables = zip(configurations, projected_weights, full_weights)
for ix, (config, w_proj, w_full) in enumerate(iterables):
seed_ix = None if seed is None else seed + ix
print(f"@it{ix+1:03}/{n_configurations}", end="\r")
w_full_approx = projection.inverse_transform(w_proj)
w_full_approx = rebuild_params(w_full_approx)
X_test, y_test = make_rotating_moons(config, n_samples=test_samples, random_state=seed_ix)
accuracy_full = (model.apply(w_full, X_test).round().ravel() == y_test).mean().item()
accuracy_proj = (model.apply(w_full_approx, X_test).round().ravel() == y_test).mean().item()
entry = {
"radius": config.item(),
"acc_full": accuracy_full,
"acc_proj": accuracy_proj
}
hist_values.append(entry)
hist_values = pd.DataFrame(hist_values).set_index("radius")
return hist_values
n_components = 100
proj = PCA(n_components=n_components)
w_transformed = proj.fit_transform(params_flat_hist)
_, rebuild_params = jax.flatten_util.ravel_pytree(params_hist[0])
components = range(0, 210, 10)
errors = []
for n_components in components:
n_components = 1 if n_components == 0 else n_components
print(f"Evaluating component {n_components}")
proj = PCA(n_components=n_components)
w_transformed = proj.fit_transform(params_flat_hist)
_, rebuild_params = jax.flatten_util.ravel_pytree(params_hist[0])
hist_values = create_comparisson_df(model, w_transformed, params_hist, radii, proj)
error = hist_values.diff(axis=1).dropna(axis=1)
mean_abs_error = error.abs().mean().item()
mean_error = error.mean().item()
errors_entry = {
"n_components": n_components,
"mean_abs_error": mean_abs_error,
"mean_error": mean_error
}
errors.append(errors_entry)
print(f"{mean_abs_error=:0.4f}")
print(f"{mean_error=:0.4f}", end="\n"*2)
errors_df = pd.DataFrame(errors, index=components)
fig, ax = plt.subplots(1, 2, figsize=(12, 4))
ax[0].set_title("Mean error")
ax[1].set_title("Mean absolute error")
errors_df["mean_error"].plot(marker="o", ax=ax[0])
errors_df["mean_abs_error"].plot(marker="o", ax=ax[1])
ax[0].grid(alpha=0.5)
ax[1].grid(alpha=0.5)
plt.tight_layout()
hist_values = []
for ix, radius in enumerate(radii):
print(f"@it{ix+1:03}/{len(radii)}", end="\r")
deg = (180 * (radii[ix] % (2 * jnp.pi))).item()
w_approx = proj.inverse_transform(w_transformed[ix])
w_approx = rebuild_params(w_approx)
w_full = params_hist[ix]
X_test, y_test = make_rotating_moons(radii[ix], n_samples=100)
accuracy_proj = (model.apply(w_approx, X_test).round().ravel() == y_test).mean().item()
accuracy_full = (model.apply(w_full, X_test).round().ravel() == y_test).mean().item()
entry = {
"radius": radius.item(),
"accuracy_full": accuracy_full,
"accuracy_approx": accuracy_proj,
}
hist_values.append(entry)
df_values = pd.DataFrame(hist_values).set_index("radius")
df_values = pd.DataFrame(hist_values).set_index("radius")
df_values.diff(axis=1).iloc[:, 1].plot()
plt.axhline(y=0, c="tab:gray", linestyle="--")
plt.xticks(rotation=45);
fig, ax = plt.subplots()
pd.DataFrame(hist_values).plot(x="radius", y="accuracy_approx", ax=ax)
pd.DataFrame(hist_values).plot(x="radius", y="accuracy_full", ax=ax)
plt.xticks(rotation=45);
###Output
_____no_output_____
###Markdown
Rotate and shift
###Code
for r in jnp.linspace(0, 2 * jnp.pi, 100):
x = 5 * jnp.cos(r)
y = 5 * jnp.sin(2 * r)
plt.scatter(x, y, c="tab:blue")
plt.axis("equal");
alpha = 0.01
n_steps = 200
model = MLP()
key = jax.random.PRNGKey(314)
tx = optax.adam(learning_rate=alpha)
radii = jnp.linspace(0, 2 * 2 * jnp.pi, n_steps)
def rotate_shift_centre():
"""
"""
...
hist = train_mlp_model(key, lambda rad: make_rotating_translating_moons(rad, noise=0.1),
model, tx, radii, centre=True)
data_hist, params_hist = hist["data"], hist["params"]
params_flat_hist = flat_and_concat_params(params_hist)
radii_mod = radii % (2 * jnp.pi)
plt.scatter(radii_mod, hist["train_accuracy"])
proj = PCA(n_components=2)
# proj = KernelPCA(n_components=2, kernel="cosine")
proj = TSNE(n_components=2, init="pca", random_state=314,
perplexity=20, learning_rate="auto")
w_transformed = proj.fit_transform(params_flat_hist)
plt.title("Projected weights")
plt.scatter(*w_transformed.T, c=radii_mod, cmap="twilight")
plt.axis("equal")
plt.axis("off")
###Output
/home/gerardoduran/miniconda3/lib/python3.9/site-packages/sklearn/manifold/_t_sne.py:982: FutureWarning: The PCA initialization in TSNE will change to have the standard deviation of PC1 equal to 1e-4 in 1.2. This will ensure better convergence.
warnings.warn(
|
wk4_scRNA-multimodal/4_scRNA_CITE-Python.ipynb | ###Markdown
Welcome to Week 4, Single Cell RNA (cont.)! This week, we're going to go a bit deeper into scRNA analysis, such as how to interact with Seurat objects, add additional datatypes including CITE-seq and TCR/BCR-seq data, and create custom, publication-ready plots.We'll continue to use Scanpy, which has some nice capabilities for multi-modal data analysis. The two datatypes we will be working with today at **CITE-seq** and **TCR/BCR-seq** data. The main idea of both is that additional information about the cell is captured using the same cell barcode from reverse transcription so that multiple types of data can be assigned to the same cell. CITE-seq is a method for capturing surface protein information using oligo-conjugated antibodies developed at the New York Genome Center. Here antibodies are conjugated to oligos which contain two important sequences: an antibody specific barcode which is used to quantify surface protein levels in individual cells and a capture sequence (either poly-A sequence or a 10X specific capture sequence) which enables the antibody oligo to be tagged with the cell barcode during reverse transcription. You can look at more details in the publication here: * https://www.ncbi.nlm.nih.gov/pubmed/28759029 Oligo-conjugated anitbodies compatible with 10X scRNA (both 5' and 3') are commercially available from BioLegend (https://www.biolegend.com/en-us/totalseq) and can also be used to multiplex different samples in the same 10X capture. This works by using an antibody which recognizes a common surface antigen and using the antibody barcode to distinguish between samples, a process known as **cell hashing**:* https://www.ncbi.nlm.nih.gov/pubmed/30567574We won't be using hashtag data today, but many of the same strategies apply and feel free to reach out if you are interested in learning more! The second data type we will be working with is TCR/BCR sequencing data. T and B cells express a highly diverse repertoire of transcripts resulting from V(D)J recombination - the T cell receptor (TCR) in T cells and immunoglobulin (Ig) or BCR in B cells. Daughter cells will share the same TCR/BCR sequence, allowing this sequence to be used to track clonal cell populations over time and space, as well as infer lineage relationships. TCR/BCR sequences are amplified from the cDNA library in the 5' immune profiling 10X kit, allowing these sequences to be matched to the gene expression library from the same cell. For more details, see the 10X website:* https://www.10xgenomics.com/products/vdj/ For both of these applications, we'll be following this tutorial:* https://scanpy-tutorials.readthedocs.io/en/multiomics/cite-seq/pbmc5k.html Import Statements
###Code
import scanpy as sc
import pandas as pd
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from collections import Counter, defaultdict
from scipy import stats as scistats
import scrublet as scr
import scipy.io
%matplotlib inline
# you'll need to change these for yourself
path = '/Users/kevin/changlab/covid19/3_scRNA/data/filtered_feature_bc_matrix/'
figpath = '/Users/kevin/changlab/covid19/4_scRNA-part-2/figures/'
# lets set the default figure settings
sc.settings.set_figure_params(dpi_save=300)
sc.settings.figdir = figpath
# helpful plotting functions, "sax" or "simple ax" and "prettify ax" or "pax"
def pax(ax):
mpl.rcParams['font.sans-serif'] = 'Helvetica'
for spine in ax.spines.values():
spine.set_color('k')
ax.set_frameon=True
ax.patch.set_facecolor('w')
ax.tick_params(direction='out', color = 'k', length=5, width=.75, pad=8)
ax.set_axisbelow(True)
ax.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
mpl.rcParams['font.sans-serif'] = 'Helvetica'
def sax(figsize=(6,6)):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
pax(ax)
return fig, ax
def sf(fig, fn, formats=['png'], dpi=300, figpath=figpath):
for f in formats:
fig.savefig(figpath + fn + '.' + f, dpi=dpi, bbox_inches='tight')
###Output
_____no_output_____
###Markdown
First, go back to the week three notebook, re-run everything, and save the output so you can just re-import the procssed dataset here.Or, you can use the file that I outputted to have the same input. I've included the code that I ran to generate it below.
###Code
# # process with scrublet
# print('processing with scrublet')
# counts_matrix = scipy.io.mmread(path + '/matrix.mtx.gz').T.tocsc()
# cells = pd.read_csv(path + '/barcodes.tsv.gz', sep='\t', header=None, names=['barcode'])
# cells = cells.set_index('barcode', drop=False)
# scrub = scr.Scrublet(counts_matrix, expected_doublet_rate=0.08)
# doublet_scores, predicted_doublets = scrub.scrub_doublets(min_counts=2,
# min_cells=3,
# min_gene_variability_pctl=85,
# n_prin_comps=30)
# predicted_doublets = scrub.call_doublets(threshold=0.25)
# cells['doublet_score'] = doublet_scores
# cells['predicted_doublet'] = predicted_doublets
# # import data
# print('importing data')
# gex = sc.read_10x_mtx(path, gex_only=True)
# gex.obs['doublet_score'] = cells.loc[gex.obs.index, 'doublet_score']
# gex.obs['predicted_doublet'] = cells.loc[gex.obs.index, 'predicted_doublet']
# # preliminary processing
# print('preliminary processing')
# sc.pp.filter_cells(gex, min_genes=200)
# sc.pp.filter_genes(gex, min_cells=3)
# mito_genes = gex.var_names.str.startswith('MT-')
# gex.obs['percent_mito'] = np.sum(
# gex[:, mito_genes].X, axis=1).A1 / np.sum(gex.X, axis=1).A1
# gex.obs['n_counts'] = gex.X.sum(axis=1).A1
# gex = gex[gex.obs.n_genes >= 500, :]
# gex = gex[gex.obs.percent_mito < 0.1, :]
# sc.pp.normalize_total(gex, target_sum=1e4)
# sc.pp.log1p(gex)
# gex.raw = gex
# # dimensionality reduction
# print('secondary processing')
# sc.pp.highly_variable_genes(gex, n_top_genes=2000)
# gex = gex[:, gex.var.highly_variable]
# sc.pp.regress_out(gex, ['n_genes'])
# sc.pp.scale(gex, max_value=10)
# sc.tl.pca(gex, svd_solver='arpack', n_comps=50)
# sc.pp.neighbors(gex, n_neighbors=10, n_pcs=50, random_state=1)
# sc.tl.leiden(gex, random_state=1, resolution=.4)
# sc.tl.umap(gex)
# new_cluster_names = ['Mono_CD14', #0
# 'CD4 T', #1
# 'B', #2
# 'CD8 T', #3
# 'NK', #4
# 'CD8 Tem', #5
# 'Mono_FCGR3A', #6
# 'Und1_Doublets', #7
# 'cDC', #8
# 'gd T', #9 gamma delta t cells
# 'pDCs', #10
# 'Platelets', #11
# 'Plasma B', #12
# 'Und2', #13
# ]
# gex.rename_categories('leiden', new_cluster_names)
# # plot things
# print('plotting')
# # plot1
# fig = sc.pl.umap(gex, color=['leiden'],
# legend_fontsize = 8,
# legend_loc = 'on data', return_fig=True)
# fig.savefig(figpath + '0_leiden-clustering-renamed.png', dpi=300, bbox_inches='tight')
# # plot2
# genes_to_plot = ['predicted_doublet','n_genes',
# 'n_counts','percent_mito']
# fig = sc.pl.umap(gex, color=genes_to_plot, use_raw=True,
# sort_order=True, ncols=2, return_fig=True)
# fig.savefig(figpath + '0_umap-metadata.png', dpi=300, bbox_inches='tight')
# # plot3
# genes_to_plot = ['CD3G','CD4','CD8A',
# 'TRDV2','KLRB1','NKG7',
# 'CD14','FCGR3A','FCER1A',
# 'MS4A1','JCHAIN','PPBP',
# ]
# fig = sc.pl.umap(gex, color=genes_to_plot, use_raw=True,
# sort_order=True, ncols=3,return_fig=True, color_map='Reds')
# fig.savefig(figpath + '0_umap-gene-expression.png', dpi=300, bbox_inches='tight')
# # save the results
# gex.write(figpath + 'scrna_wk3_processed.h5ad', compression='gzip')
# import the data
# reminder that you'll need to change the path to this
gex = sc.read_h5ad(figpath + 'scrna_wk3_processed.h5ad')
gex
# make sure that everything looks good
genes_to_plot = ['CD3G','CD4','CD8A',
'TRDV2','KLRB1','NKG7',
'CD14','FCGR3A','FCER1A',
'MS4A1','JCHAIN','PPBP',
]
fig = sc.pl.umap(gex, color=genes_to_plot, use_raw=True,
sort_order=True, ncols=3,return_fig=True, color_map='Reds')
plt.show()
fig = sc.pl.umap(gex, color=['leiden'],
legend_fontsize = 8,
legend_loc = 'on data', return_fig=True)
plt.show()
###Output
_____no_output_____
###Markdown
CITE-seq Analysis
###Code
# first, read in the cite seq information
# remember that gex_only=False will let you read them both in
data = sc.read_10x_mtx(path, gex_only=False)
data
# what cite seq features do we have?
# how many genes?
# how many cite-seq?
# rename the antibody capture genes
# get rid of the "_TotalSeqC" part of the name just to make our lives easier
# e.g. CD3_TotalSeqC to CD3
# filter this to just include cells that we analyzed previously, so the datasets will align
# you can do this with
data = data[data.obs.index.isin(gex.obs.index), :]
# now lets get just the protein information, and make that its own anndata object
protein = data[:, data.var['feature_types'] == 'Antibody Capture'].copy()
protein
###Output
_____no_output_____
###Markdown
Now let's break out of scanpy for a minute to inspect, normalize, and scale this data on our ownScanpy seems to be developing some functions specifically for protein data, but hasn't yet implemented them. But this isn't a problem! We can do things on our own, and transform the data into a format that scanpy wants.**We're going to break this down in a few steps:**1. get the raw antibody count data from the protein anndata object. 2. compute the centered log ratio (CLR) of antibody counts (this is different than for RNA!) - more notes on this below. 3. scale the data to be mean centered and have unit variance (i.e., z-normalization). This is the same as for RNA. 4. save the CLR normalized antibody counts as the raw data of the protein object, and the scaled data as the (normal) data of the protein object, which will be used for dimensionality reduction. Now, in terms of what the actual normalizations are: we're going to do this with the .apply() function with dataframes. I'm providing an example for how to you would do the depth normalization that you'd normally do for RNA-seq below, but you should play around on your own with implementing the normalizations in 2 and 3.**Normalization methods:*** depth normalization (as a comparison). For a cell, divide the counts for each gene/antibody by the sum of all gene/antibody counts for that cell, then multiply by some scaling factor (e.g. 10,000). Commonly, you would also log transform this, and add a pseudocount (say 1). This is sometimes referred to as **log1p**.* CLR. For an antibody, divide the counts for each antibody by the geometric mean antibody counts across all cells, then take the natural log of this. Similarly, you'll add a pseudocount of 1.* z-normalization (scaling to zero mean and unit variance). Basically, you're making all datapoints have similar distributions. For a gene, return the count for a cell minus the mean count across all cells, divided by the standard deviation across all cells.* clipping extremes. You can use the np.clip() function to do this. Basically, this will take any value lower than the lower bound in np.clip and make it equal to the lower bound, and do the same for the upper bound. You might combine this with computing the mean and standard deviation, to clip values > 3 stds away from the mean; or np.percentile() to clip values that are less or greater than a given percentile in the data.It's worth taking the time to look at why the CLR transformation is better than a simple log transformation. Why? Because antibodies aren't genes - when a gene is negative, the count is 0; when a gene is positive, the count is greater than 0. But does this hold true with antibodies? When an antibody is negative, the count isn't necessarily 0 - the antibody might have background! The CLR transformation does a better job of dealing with this, by looking at the relative abundance of the antibody.
###Code
# get the raw data
protein_orig = pd.DataFrame(protein.X.todense(), index=protein.obs.index, columns=protein.var.index).T
# what does your data look like?
# I'd recommend first plotting the distribution of total antibody counts across all cells
# sf(fig, '1_preprocess_histogram_antibody-counts')
# what if we just take a 'naive' approach to normalization?
protein_norm_depth = protein_orig.apply(lambda x: 10000 * x / x.sum(), axis=0)
protein_norm_depth = np.log(protein_norm_depth + 1)
# plot the distribution of counts for all of these
fig = plt.figure(figsize=(10,10))
axes = [fig.add_subplot(5,4,i+1) for i in range(len(protein_norm_depth.index))]
xlim = [protein_norm_depth.min().min(), protein_norm_depth.max().max()]
bins = np.linspace(xlim[0], xlim[1], 100)
for ix, p in enumerate(protein_orig.index):
ax = axes[ix]
pax(ax)
vals = protein_norm_depth.loc[p]
ax.hist(vals, bins=bins)
ax.set_title(p, size=16)
ax.set_xlim(xlim)
fig.tight_layout()
plt.show()
sf(fig, '1_preprocess_log1p-distributions')
# now lets compare this with the CLR approach
def clr(x, pseudo=1):
x = x + pseudo
geo_mean = scistats.gmean(x)
return np.log(x / geo_mean)
protein_norm_clr = protein_orig.apply(clr, axis=1)
protein_norm_clr.head()
# plot the distribution of counts for all of these
# sf(fig, '1_preprocess_clr-distributions')
# now lets compare the two with a scatter plot
# sf(fig, '1_preprocess_scatter-norm-methods')
# now scale this to unit variance
# see https://en.wikipedia.org/wiki/Feature_scaling under z-normalization
# also, clip extremes - clip anything less than -10 and above 10
# plot the distribution of counts for all of the scaled data
# note how the distributions are relatively similar
# sf(fig, '1_preprocess_scaled_clr-distributions')
# what if we want to make a scatter plot of one CD4 vs CD8a?
# compare the depth-normalized vs CLR normalized counts
# make it once with depth-normalized counts and once with CLR normalized
# sf(fig, '1_preprocess_scatter_log1p_cd4-8')
# what if we want to make a scatter plot of one antibody?
# sf(fig, '1_preprocess_scatter_clr_cd4-8')
###Output
_____no_output_____
###Markdown
Now go back to scanpyLet's save the protein_norm_clr values as the raw data in protein, and the protein_scaled values in the data slot of protein. Let's also exclude the control proteins from the main data slot.
###Code
protein = data[:, data.var['feature_types'] == 'Antibody Capture'].copy()
protein.var['control'] = ['control' in i for i in protein.var.index]
protein.X = protein_norm_clr.T
protein.raw = protein
protein.X = protein_scaled.T
protein = protein[:, ~protein.var['control']]
protein
protein.var
protein_genes = ['CD3D','CD19','PTPRC',
'CD4','CD8A','CD14','FCGR3A',
'NCAM1','IL2RA','PTPRC',
'PDCD1','TIGIT','IL7R','FUT4']
protein.var['rna_name'] = protein_genes
name_dict = dict(zip(protein.var.index, protein.var['rna_name']))
protein.var.head()
sc.pp.pca(protein, n_comps=len(protein.var)-1)
sc.pp.neighbors(protein, n_neighbors=30, n_pcs=len(protein.var)-1)
sc.tl.leiden(protein, key_added="protein_leiden", resolution=.33)
sc.tl.umap(protein)
genes_to_plot = protein.var.index.tolist() + ['protein_leiden']
fig = sc.pl.umap(protein, color=genes_to_plot,
sort_order=True, ncols=4,return_fig=True, color_map='Blues', use_raw=True,
vmin='p5', vmax='p99.9')
fig.set_size_inches(12,12)
sf(fig,'2_umap_with_cite-clustering')
plt.show()
###Output
_____no_output_____
###Markdown
Now let's integrate this with the RNA dataI'm going to do this a little fast and loose because I think that scanpy hasn't yet fully implemented the CITE-seq stuff too well. Basically, we're going to add the umap coordinates and clustering information from the RNA processed data to the protein-processed data, and vice versa.
###Code
# add gex to protein
protein.obsm['RNA_umap'] = gex[protein.obs.index].obsm['X_umap']
protein.obs['rna_leiden'] = gex.obs.loc[protein.obs.index, 'leiden']
# add protein to gex
# I'll leave you do to this
# now, let's plot the cite-seq information on top of the rna clusters
genes_to_plot = protein.var.index.tolist() + ['rna_leiden']
fig = sc.pl.embedding(protein, 'RNA_umap', color=genes_to_plot,
sort_order=True, ncols=4,return_fig=True, color_map='Blues', use_raw=True,
vmin='p5', vmax='p99.9', legend_fontsize=8)
fig.set_size_inches(12,12)
sf(fig,'3_RNA-umap_with_CITE-counts')
plt.show()
# and, let's plot some rna-seq information on top of the cite clusters
# I'll leave you to do this one
# sf(fig,'3_CITE-umap_with_RNA-counts')
###Output
_____no_output_____
###Markdown
Now, let's plot RNA information against CITE information to see how they compare.
###Code
# first, get the metadata from the scanpy .obs dataframe
meta = gex.obs
meta.head()
# and add in the umap coordinates from the RNA
meta['umap_1'] = gex.obsm['X_umap'][:, 0]
meta['umap_2'] = gex.obsm['X_umap'][:, 1]
# now add in the umap coordinates from the CITE-seq
meta['umap-cite_1'] = protein[meta.index].obsm['X_umap'][:, 0]
meta['umap-cite_2'] = protein[meta.index].obsm['X_umap'][:, 1]
meta.head()
# here's two helper functions to get gene/protein expression information
def get_gene_expression(gene, adata=gex, undo_log=False, cells=''):
gene_ix = adata.raw.var.index.get_loc(gene)
vals = adata.raw.X[:, gene_ix].toarray().ravel()
if undo_log:
vals = np.exp(vals) - 1
vals = pd.Series(vals, index=adata.obs.index)
return vals
def get_protein_expression(gene, data=protein_norm_clr):
vals = protein_norm_clr.loc[gene]
return vals
# make a scatter plot of RNA expression vs CITE-seq counts
for gene in protein.var.index:
rna_vals = get_gene_expression(name_dict[gene])
protein_vals = get_protein_expression(gene)
sf(fig, '4_scatter_rna-cite_' + gene)
# plot the RNA and CITE counts on top of the UMAP from the RNA data
for gene in protein.var.index:
rna_vals = get_gene_expression(name_dict[gene])
protein_vals = get_protein_expression(gene)
fig = plt.figure(figsize=(10,5))
# plot RNA
# plot PROTEIN
sf(fig, '4_umap_rna-cite_' + gene)
###Output
_____no_output_____ |
dev-files/notebook files/poisson archive/poisson problem/poisson_1d.ipynb | ###Markdown
Modelling poisson using PINN__Author: Manu Jayadharan__ Written as part of FlowNet package, a TensorFlow based neural network package to solve fluid flow PDEs. Solving the poisson equation $-\Delta u = f$ using a physics informed neural network 1D problem poisson problem Manufactured solution We use $u = 3sin(4x)$ for $x\in [-1,1]$ Importing packages
###Code
import numpy as np
import tensorflow as tf
from tensorflow import keras
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Manufacturing data for trainig
###Code
np.random.seed(123)
X_tr_pde = np.random.uniform(-1,1,500).reshape(500,1)
###Output
_____no_output_____
###Markdown
Plotting histogram of randomly selected points to make sure they are uniformly distributed
###Code
plt.hist(X_tr_pde)
plt.xlabel("training points")
plt.ylabel("frequency ")
Y_tr_pde = np.zeros((X_tr_pde.shape[0],1))
Y_tr_pde = np.concatenate([Y_tr_pde,np.zeros((Y_tr_pde.shape[0],1))],axis=1)
Y_tr_pde.shape
X_tr_Dr_bc_left = -1*np.ones(200).reshape(200,1)
X_tr_Dr_bc_right = 1*np.ones(200).reshape(200,1)
X_bc = np.concatenate([X_tr_Dr_bc_left,X_tr_Dr_bc_right],axis=0)
Y_tr_bc = 3*np.sin(4*X_bc)
Y_tr_bc = np.concatenate([Y_tr_bc,np.ones((Y_tr_bc.shape[0],1))],axis=1)
###Output
_____no_output_____
###Markdown
Scaling the inputs(optional)
###Code
# from sklearn.preprocessing import StandardScaler
# scaler = StandardScaler()
# X_tr_pde = scaler.fit_transform(X_tr_pde)
X_tr = np.concatenate((X_tr_pde, X_bc), axis=0)
Y_tr = np.concatenate((Y_tr_pde, Y_tr_bc), axis=0)
# from sklearn.preprocessing import StandardScaler
# scaler = StandardScaler()
# X_tr = scaler.fit_transform(X_tr)
# X_tr.std()
(X_tr[0:3] +0.0056276)/4.520744138916567
###Output
_____no_output_____
###Markdown
Defining the NN model (custom Keras model)- __Model specifications__: 7 layers: 1 input layer, 3 hidden layers with 20 neurons each, 1 dense intermediate layer, 1 gradient layer, 1 laplacian layer, 1 pde layer. - Output is a list of two elements: value of function and value of the pde operator.- mean squared error is used for finding the cost function.- Specialized, sgd( stochastic gradient descenet) type optimizer is used: either nadam or adam.- tanh activation functions are used.
###Code
from tensorflow.keras import backend as K
class Poisson1d(tf.keras.Model):
def __init__(self):
super(Poisson1d, self).__init__()
# self.batch_norm_ = keras.layers.BatchNormalization()
self.flatten_input = keras.layers.Flatten()
he_kernel_init = keras.initializers.he_uniform()
self.dense_1 = keras.layers.Dense(20, activation="tanh",
kernel_initializer=he_kernel_init,
name="dense_1")
self.dense_2 = keras.layers.Dense(20, activation="tanh",
kernel_initializer=he_kernel_init,
name="dense_2")
self.dense_3 = keras.layers.Dense(20, activation="tanh",
kernel_initializer=he_kernel_init,
name="dense_3")
self.dense_4 = keras.layers.Dense(1,
name="dense_4")
def findGrad(self,func,argm):
return keras.layers.Lambda(lambda x: K.gradients(x[0],x[1])[0]) ([func,argm])
def findPdeLayer(self, pde_lhs, input_arg):
return keras.layers.Lambda(lambda z: z[0] + 48*tf.sin(4*z[1])) ([pde_lhs, input_arg])
def call(self, inputs):
# layer_0 = self.batch_norm_input(inputs)
layer_0 = self.flatten_input(inputs)
# layer_0_1 = self.batch_norm_input(layer_0)
layer_1 = self.dense_1(layer_0)
# layer_2_0 = self.batch_norm_input(layer_1)
layer_2 = self.dense_2(layer_1)
# layer_3_0 = self.batch_norm_(layer_2)
layer_3 = self.dense_3(layer_2)
layer_4 = self.dense_4(layer_3)
grad_layer = self.findGrad(layer_4, inputs)
laplace_layer = self.findGrad(grad_layer, inputs)
pde_layer = self.findPdeLayer(laplace_layer, inputs)
return layer_4, pde_layer
###Output
_____no_output_____
###Markdown
Defining the loss functions
###Code
#Loss coming from the boundary terms
def u_loss(y_true, y_pred):
y_true_act = y_true[:,:-1]
at_boundary = tf.cast(y_true[:,-1:,],bool)
u_sq_error = (1/2)*tf.square(y_true_act-y_pred)
return tf.where(at_boundary, u_sq_error, 0.)
#Loss coming from the PDE constrain
def pde_loss(y_true, y_pred):
y_true_act = y_true[:,:-1]
at_boundary = tf.cast(y_true[:,-1:,],bool)
#need to change this to just tf.square(y_pred) after pde constrain is added to grad_layer
# pde_sq_error = (1/2)*tf.square(y_true_act-y_pred)
pde_sq_error = (1/2)*tf.square(y_pred)
return tf.where(at_boundary,0.,pde_sq_error)
###Output
_____no_output_____
###Markdown
Instantiating and compiling the poisson_model
###Code
poisson_NN = Poisson1d()
poisson_NN.compile(loss=[u_loss,pde_loss],optimizer="adam")
poisson_NN.fit(x=X_tr, y=Y_tr,epochs=100)
###Output
Epoch 1/100
29/29 [==============================] - 0s 8ms/step - loss: 259.7652 - output_1_loss: 1.6987 - output_2_loss: 258.0666
Epoch 2/100
29/29 [==============================] - 0s 8ms/step - loss: 199.6366 - output_1_loss: 3.7078 - output_2_loss: 195.9288
Epoch 3/100
29/29 [==============================] - 0s 4ms/step - loss: 167.5798 - output_1_loss: 5.7012 - output_2_loss: 161.8786
Epoch 4/100
29/29 [==============================] - 0s 6ms/step - loss: 147.2352 - output_1_loss: 6.9477 - output_2_loss: 140.2874
Epoch 5/100
29/29 [==============================] - 0s 6ms/step - loss: 127.5192 - output_1_loss: 8.2436 - output_2_loss: 119.2756
Epoch 6/100
29/29 [==============================] - 0s 7ms/step - loss: 107.6846 - output_1_loss: 9.3374 - output_2_loss: 98.3473
Epoch 7/100
29/29 [==============================] - 0s 7ms/step - loss: 94.4738 - output_1_loss: 10.1162 - output_2_loss: 84.3576
Epoch 8/100
29/29 [==============================] - 0s 6ms/step - loss: 82.4624 - output_1_loss: 10.6387 - output_2_loss: 71.8237
Epoch 9/100
29/29 [==============================] - 0s 3ms/step - loss: 69.6333 - output_1_loss: 10.6447 - output_2_loss: 58.9887
Epoch 10/100
29/29 [==============================] - 0s 4ms/step - loss: 55.9477 - output_1_loss: 10.3795 - output_2_loss: 45.5682
Epoch 11/100
29/29 [==============================] - 0s 9ms/step - loss: 42.3476 - output_1_loss: 9.9878 - output_2_loss: 32.3598
Epoch 12/100
29/29 [==============================] - 0s 8ms/step - loss: 29.1121 - output_1_loss: 9.2851 - output_2_loss: 19.8270
Epoch 13/100
29/29 [==============================] - 0s 8ms/step - loss: 19.3638 - output_1_loss: 8.3920 - output_2_loss: 10.9718
Epoch 14/100
29/29 [==============================] - 1s 19ms/step - loss: 14.4945 - output_1_loss: 7.5555 - output_2_loss: 6.9389
Epoch 15/100
29/29 [==============================] - 0s 8ms/step - loss: 12.6046 - output_1_loss: 6.7571 - output_2_loss: 5.8475
Epoch 16/100
29/29 [==============================] - 0s 14ms/step - loss: 10.5798 - output_1_loss: 5.9927 - output_2_loss: 4.5870
Epoch 17/100
29/29 [==============================] - 0s 6ms/step - loss: 9.1434 - output_1_loss: 5.2864 - output_2_loss: 3.8570
Epoch 18/100
29/29 [==============================] - 0s 10ms/step - loss: 7.8388 - output_1_loss: 4.6431 - output_2_loss: 3.1957
Epoch 19/100
29/29 [==============================] - 0s 7ms/step - loss: 6.9969 - output_1_loss: 4.1099 - output_2_loss: 2.8870
Epoch 20/100
29/29 [==============================] - 0s 14ms/step - loss: 6.1960 - output_1_loss: 3.6754 - output_2_loss: 2.5206
Epoch 21/100
29/29 [==============================] - 0s 9ms/step - loss: 5.5211 - output_1_loss: 3.2757 - output_2_loss: 2.2454
Epoch 22/100
29/29 [==============================] - 0s 8ms/step - loss: 4.7912 - output_1_loss: 2.8491 - output_2_loss: 1.9420
Epoch 23/100
29/29 [==============================] - 0s 8ms/step - loss: 4.2952 - output_1_loss: 2.4328 - output_2_loss: 1.8624
Epoch 24/100
29/29 [==============================] - 0s 10ms/step - loss: 3.6080 - output_1_loss: 2.0475 - output_2_loss: 1.5605
Epoch 25/100
29/29 [==============================] - 0s 12ms/step - loss: 2.9790 - output_1_loss: 1.7081 - output_2_loss: 1.2709
Epoch 26/100
29/29 [==============================] - 1s 19ms/step - loss: 2.4628 - output_1_loss: 1.4023 - output_2_loss: 1.0605
Epoch 27/100
29/29 [==============================] - 0s 11ms/step - loss: 2.0742 - output_1_loss: 1.1864 - output_2_loss: 0.8878
Epoch 28/100
29/29 [==============================] - 0s 9ms/step - loss: 1.7676 - output_1_loss: 1.0167 - output_2_loss: 0.7509
Epoch 29/100
29/29 [==============================] - 0s 9ms/step - loss: 1.5395 - output_1_loss: 0.8897 - output_2_loss: 0.6498
Epoch 30/100
29/29 [==============================] - 0s 10ms/step - loss: 1.3731 - output_1_loss: 0.7909 - output_2_loss: 0.5822
Epoch 31/100
29/29 [==============================] - 0s 8ms/step - loss: 1.2571 - output_1_loss: 0.7032 - output_2_loss: 0.5539
Epoch 32/100
29/29 [==============================] - 0s 8ms/step - loss: 1.0477 - output_1_loss: 0.6191 - output_2_loss: 0.4286
Epoch 33/100
29/29 [==============================] - 0s 10ms/step - loss: 0.9775 - output_1_loss: 0.5610 - output_2_loss: 0.4165
Epoch 34/100
29/29 [==============================] - 0s 6ms/step - loss: 0.8646 - output_1_loss: 0.5001 - output_2_loss: 0.3645
Epoch 35/100
29/29 [==============================] - 0s 7ms/step - loss: 0.8120 - output_1_loss: 0.4500 - output_2_loss: 0.3620
Epoch 36/100
29/29 [==============================] - 0s 9ms/step - loss: 0.7423 - output_1_loss: 0.4108 - output_2_loss: 0.3315
Epoch 37/100
29/29 [==============================] - 0s 13ms/step - loss: 0.6386 - output_1_loss: 0.3692 - output_2_loss: 0.2693
Epoch 38/100
29/29 [==============================] - 0s 17ms/step - loss: 0.5728 - output_1_loss: 0.3305 - output_2_loss: 0.2423
Epoch 39/100
29/29 [==============================] - 0s 12ms/step - loss: 0.5022 - output_1_loss: 0.2914 - output_2_loss: 0.2108
Epoch 40/100
29/29 [==============================] - 0s 8ms/step - loss: 0.4589 - output_1_loss: 0.2579 - output_2_loss: 0.2010
Epoch 41/100
29/29 [==============================] - 0s 12ms/step - loss: 0.4043 - output_1_loss: 0.2306 - output_2_loss: 0.1737
Epoch 42/100
29/29 [==============================] - 0s 8ms/step - loss: 0.3789 - output_1_loss: 0.2028 - output_2_loss: 0.1761
Epoch 43/100
29/29 [==============================] - 0s 8ms/step - loss: 0.3445 - output_1_loss: 0.1821 - output_2_loss: 0.1624
Epoch 44/100
29/29 [==============================] - 1s 20ms/step - loss: 0.3695 - output_1_loss: 0.1614 - output_2_loss: 0.2081
Epoch 45/100
29/29 [==============================] - 0s 8ms/step - loss: 0.2772 - output_1_loss: 0.1431 - output_2_loss: 0.1341
Epoch 46/100
29/29 [==============================] - 0s 8ms/step - loss: 0.2570 - output_1_loss: 0.1235 - output_2_loss: 0.1336
Epoch 47/100
29/29 [==============================] - 0s 9ms/step - loss: 0.2372 - output_1_loss: 0.1082 - output_2_loss: 0.1290
Epoch 48/100
29/29 [==============================] - 0s 11ms/step - loss: 0.2071 - output_1_loss: 0.0952 - output_2_loss: 0.1119
Epoch 49/100
29/29 [==============================] - 0s 16ms/step - loss: 0.1887 - output_1_loss: 0.0816 - output_2_loss: 0.1071
Epoch 50/100
29/29 [==============================] - 0s 9ms/step - loss: 0.1826 - output_1_loss: 0.0705 - output_2_loss: 0.1121
Epoch 51/100
29/29 [==============================] - 0s 8ms/step - loss: 0.1690 - output_1_loss: 0.0628 - output_2_loss: 0.1063
Epoch 52/100
29/29 [==============================] - 0s 8ms/step - loss: 0.1603 - output_1_loss: 0.0537 - output_2_loss: 0.1066
Epoch 53/100
29/29 [==============================] - 0s 13ms/step - loss: 0.1697 - output_1_loss: 0.0455 - output_2_loss: 0.1243
Epoch 54/100
29/29 [==============================] - 0s 12ms/step - loss: 0.1362 - output_1_loss: 0.0394 - output_2_loss: 0.0968
Epoch 55/100
29/29 [==============================] - 0s 14ms/step - loss: 0.1200 - output_1_loss: 0.0342 - output_2_loss: 0.0857
Epoch 56/100
29/29 [==============================] - 0s 9ms/step - loss: 0.1060 - output_1_loss: 0.0292 - output_2_loss: 0.0768
Epoch 57/100
29/29 [==============================] - 0s 11ms/step - loss: 0.1285 - output_1_loss: 0.0248 - output_2_loss: 0.1037
Epoch 58/100
29/29 [==============================] - 0s 10ms/step - loss: 0.0924 - output_1_loss: 0.0206 - output_2_loss: 0.0718
Epoch 59/100
29/29 [==============================] - 0s 13ms/step - loss: 0.0891 - output_1_loss: 0.0174 - output_2_loss: 0.0717
Epoch 60/100
29/29 [==============================] - 0s 5ms/step - loss: 0.0812 - output_1_loss: 0.0147 - output_2_loss: 0.0664
Epoch 61/100
29/29 [==============================] - 0s 9ms/step - loss: 0.0689 - output_1_loss: 0.0127 - output_2_loss: 0.0562
Epoch 62/100
29/29 [==============================] - 0s 5ms/step - loss: 0.0796 - output_1_loss: 0.0104 - output_2_loss: 0.0691
Epoch 63/100
29/29 [==============================] - 0s 10ms/step - loss: 0.0631 - output_1_loss: 0.0085 - output_2_loss: 0.0546
Epoch 64/100
###Markdown
Testing the trained network
###Code
X_test_st = np.random.uniform(-1,1,100).reshape(100,1)
###Output
_____no_output_____
###Markdown
Scaling the test set (only if the trainng data was scaled)
###Code
# #Scaling test set
# X_test_st_2 = scaler.transform(X_test_st)
#xtrain: mean, std: -0.005627660222786496 4.520744138916567
Y_test = poisson_NN.predict(X_test_st)
###Output
_____no_output_____
###Markdown
Plotting the true and predicted solutions
###Code
# fig, ax = plt.subplots(nrows=2,ncols=2, figsize=(10,10))
#plotting predicted solution
plt.figure(figsize=(15,7))
plt.subplot(1,2,1)
plt.scatter(X_test_st, Y_test[0][:,0])
plt.title("Predicted solution")
plt.subplot(1,2,2)
plt.scatter(X_test_st, 3*np.sin(4*X_test_st), c="r")
plt.title("True solution")
#True vs predicted solution
plt.figure(figsize=(10,5))
plt.scatter(np.sin(4*X_test_st), Y_test[0][:,0], c="g")
plt.title("True solution vs predicted solution")
plt.xlabel("True solution")
plt.ylabel("Predicted solution")
plt.show()
###Output
_____no_output_____ |
nb_ex2_1_ann_mnist_cl.ipynb | ###Markdown
2.2 필기체를 구분하는 분류 ANN 구현분류 ANN은 클래스가 둘 이상인 데이터를 분류하는 인공지능 방법입니다. 2.2.1 분류 ANN을 위한 인공지능 모델 구현여기서는 분류 ANN의 구현과 관련하여 1단계, 2단계, 3단계를 다룹니다. 1. 케라스 패키지로부터 2가지 모듈을 불러옵니다.
###Code
from keras import layers, models
###Output
_____no_output_____
###Markdown
2. ANN 모델을 분산 방식으로 구현합니다.
###Code
def ANN_models_func(Nin, Nh, Nout):
x = layers.Input(shape=(Nin,))
h = layers.Activation('relu')(layers.Dense(Nh)(x))
y = layers.Activation('softmax')(layers.Dense(Nout)(h))
model = models.Model(x, y)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
###Output
_____no_output_____
###Markdown
3. ANN의 모델을 연쇄 방식으로 구현하는 방법을 알아봅니다
###Code
def ANN_seq_func(Nin, Nh, Nout):
model = models.Sequential()
model.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
model.add(layers.Dense(Nout, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
return model
###Output
_____no_output_____
###Markdown
4. 구현할 ANN 코드의 재사용성을 높이기 위해 객체지향 방식으로 구현할 수도 있습니다.
###Code
class ANN_models_class(models.Model):
def __init__(self, Nin, Nh, Nout):
# Prepare network layers and activate functions
hidden = layers.Dense(Nh)
output = layers.Dense(Nout)
relu = layers.Activation('relu')
softmax = layers.Activation('softmax')
# Connect network elements
x = layers.Input(shape=(Nin,))
h = relu(hidden(x))
y = softmax(output(h))
super().__init__(x, y)
self.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
5. 앞에서는 각 계층을 별도로 정의했는데 연쇄Sequential 방식을 사용하면 더 편리하게 모델링할 수 있습니다.
###Code
class ANN_seq_class(models.Sequential):
def __init__(self, Nin, Nh, Nout):
super().__init__()
self.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
self.add(layers.Dense(Nout, activation='softmax'))
self.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
2.2.2 분류 ANN에 사용할 데이터 불러오기 6. 인공지능으로 처리할 데이터를 불러오는 방법을 알아봅니다.
###Code
import numpy as np
from keras import datasets # mnist
from keras.utils import np_utils # to_categorical
def Data_func():
(X_train, y_train), (X_test, y_test) = datasets.mnist.load_data()
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
L, H, W = X_train.shape
X_train = X_train.reshape(-1, W * H)
X_test = X_test.reshape(-1, W * H)
X_train = X_train / 255.0
X_test = X_test / 255.0
return (X_train, Y_train), (X_test, Y_test)
###Output
_____no_output_____
###Markdown
2.2.3 분류 ANN 학습 결과 그래프 구현 7. ANN 학습 결과를 분석하는 방법을 알아봅니다. - 그래프를 그리는 라이브러리인 pyplot를 matplotlib 패키지로 부터 불러와서 plt로 이름을 재정의 합니다.
###Code
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
- 손실을 그리는 함수는 다음과 같습니다.
###Code
# from keraspp.skeras import plot_loss
def plot_loss(history, title=None):
if not isinstance(history, dict):
history = history.history
plt.plot(history['loss'])
plt.plot(history['val_loss'])
if title is not None:
plt.title(title)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(['Training', 'Validation'], loc=0)
###Output
_____no_output_____
###Markdown
- 다음은 정확도를 그리는 함수입니다.
###Code
# from keraspp.skeras import plot_acc
def plot_acc(history, title=None):
if not isinstance(history, dict):
history = history.history
plt.plot(history['accuracy'])
plt.plot(history['val_accuracy'])
if title is not None:
plt.title(title)
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(['Training', 'Validation'], loc=0)
###Output
_____no_output_____
###Markdown
2.2.4 분류 ANN 학습 및 성능 분석 8. ANN에 사용될 파라미터를 정의합니다. - ANN에 사용될 파라미터 4가지를 정의합니다.
###Code
Nin = 784
Nh = 100
number_of_class = 10
Nout = number_of_class
###Output
_____no_output_____
###Markdown
- 모델의 인스턴스를 만들고 데이터도 불러옵니다.
###Code
# model = ANN_models_func(Nin, Nh, Nout)
# model = ANN_seq_func(Nin, Nh, Nout)
# model = ANN_models_class(Nin, Nh, Nout)
model = ANN_seq_class(Nin, Nh, Nout)
(X_train, Y_train), (X_test, Y_test) = Data_func()
###Output
2021-09-23 17:43:53.247579: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1
2021-09-23 17:43:53.632230: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:53.632502: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1561] Found device 0 with properties:
pciBusID: 0000:01:00.0 name: NVIDIA GeForce GTX 1050 computeCapability: 6.1
coreClock: 1.455GHz coreCount: 5 deviceMemorySize: 2.00GiB deviceMemoryBandwidth: 104.43GiB/s
2021-09-23 17:43:53.636211: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1
2021-09-23 17:43:53.703323: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10
2021-09-23 17:43:53.747754: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10
2021-09-23 17:43:53.765934: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10
2021-09-23 17:43:53.838488: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10
2021-09-23 17:43:53.851999: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10
2021-09-23 17:43:53.979091: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7
2021-09-23 17:43:53.980027: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:53.981101: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:53.981412: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1703] Adding visible gpu devices: 0
2021-09-23 17:43:53.982565: I tensorflow/core/platform/cpu_feature_guard.cc:143] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA
2021-09-23 17:43:54.035395: I tensorflow/core/platform/profile_utils/cpu_utils.cc:102] CPU Frequency: 3599995000 Hz
2021-09-23 17:43:54.047349: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x55cb9b690930 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2021-09-23 17:43:54.047396: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
2021-09-23 17:43:54.050270: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:54.050578: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1561] Found device 0 with properties:
pciBusID: 0000:01:00.0 name: NVIDIA GeForce GTX 1050 computeCapability: 6.1
coreClock: 1.455GHz coreCount: 5 deviceMemorySize: 2.00GiB deviceMemoryBandwidth: 104.43GiB/s
2021-09-23 17:43:54.050626: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1
2021-09-23 17:43:54.050642: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10
2021-09-23 17:43:54.050656: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10
2021-09-23 17:43:54.050669: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10
2021-09-23 17:43:54.050683: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10
2021-09-23 17:43:54.050696: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10
2021-09-23 17:43:54.050712: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7
2021-09-23 17:43:54.051316: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:54.052101: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:54.052285: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1703] Adding visible gpu devices: 0
2021-09-23 17:43:54.053000: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1
2021-09-23 17:43:54.594489: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1102] Device interconnect StreamExecutor with strength 1 edge matrix:
2021-09-23 17:43:54.594516: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1108] 0
2021-09-23 17:43:54.594555: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1121] 0: N
2021-09-23 17:43:54.595692: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:54.595911: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1330] Could not identify NUMA node of platform GPU id 0, defaulting to 0. Your kernel may not have been built with NUMA support.
2021-09-23 17:43:54.596532: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:54.597299: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:54.597725: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1247] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 1421 MB memory) -> physical GPU (device: 0, name: NVIDIA GeForce GTX 1050, pci bus id: 0000:01:00.0, compute capability: 6.1)
2021-09-23 17:43:54.604408: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x55cb9f6441d0 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:
2021-09-23 17:43:54.604427: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): NVIDIA GeForce GTX 1050, Compute Capability 6.1
###Markdown
- 만들어진 인스턴스와 불러온 데이터를 이용해 모델을 학습하는 방법을 알아봅니다.
###Code
history = model.fit(X_train, Y_train, epochs=5, batch_size=100, validation_split=0.2)
###Output
2021-09-23 17:44:02.248997: W tensorflow/core/framework/cpu_allocator_impl.cc:81] Allocation of 376320000 exceeds 10% of free system memory.
2021-09-23 17:44:04.484273: W tensorflow/core/framework/cpu_allocator_impl.cc:81] Allocation of 376320000 exceeds 10% of free system memory.
2021-09-23 17:44:04.795776: W tensorflow/core/framework/cpu_allocator_impl.cc:81] Allocation of 301056000 exceeds 10% of free system memory.
###Markdown
- 학습이나 검증에 사용되지 않은 데이터(X_test, y_test)로 성능을 최종 평가한 결과를 살펴봅니다.
###Code
performace_test = model.evaluate(X_test, Y_test, batch_size=100)
print('Test Loss and Accuracy ->', performace_test)
###Output
29/100 [=======>......................] - ETA: 0s - loss: 0.1400 - accuracy: 0.9566
###Markdown
- 손실과 정확도의 추이를 그려봅니다
###Code
plot_loss(history)
plt.show()
plot_acc(history)
plt.show()
###Output
_____no_output_____
###Markdown
--- 2.2.5 전체 코드
###Code
# file: ex2_1_ann_mnist_cl.py
# 1.분류 ANN을 위한 인공지능 모델 구현
from keras import layers, models
# 2. 분산 방식 모델링을 포함하는 함수형 구현
def ANN_models_func(Nin, Nh, Nout):
x = layers.Input(shape=(Nin,))
h = layers.Activation('relu')(layers.Dense(Nh)(x))
y = layers.Activation('softmax')(layers.Dense(Nout)(h))
model = models.Model(x, y)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# 3. # 연쇄 방식 모델링을 포함하는 함수형 구현
def ANN_seq_func(Nin, Nh, Nout):
model = models.Sequential()
model.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
model.add(layers.Dense(Nout, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
return model
# 4. 분산 방식 모델링을 포함하는 객체지향형 구현
class ANN_models_class(models.Model):
def __init__(self, Nin, Nh, Nout):
# Prepare network layers and activate functions
hidden = layers.Dense(Nh)
output = layers.Dense(Nout)
relu = layers.Activation('relu')
softmax = layers.Activation('softmax')
# Connect network elements
x = layers.Input(shape=(Nin,))
h = relu(hidden(x))
y = softmax(output(h))
super().__init__(x, y)
self.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
# 5. 연쇄 방식 모델링을 포함하는 객체지향형 구현
class ANN_seq_class(models.Sequential):
def __init__(self, Nin, Nh, Nout):
super().__init__()
self.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
self.add(layers.Dense(Nout, activation='softmax'))
self.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
# 6. 분류 ANN에 사용할 데이터 불러오기
import numpy as np
from keras import datasets # mnist
from keras.utils import np_utils # to_categorical
def Data_func():
(X_train, y_train), (X_test, y_test) = datasets.mnist.load_data()
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
L, H, W = X_train.shape
X_train = X_train.reshape(-1, W * H)
X_test = X_test.reshape(-1, W * H)
X_train = X_train / 255.0
X_test = X_test / 255.0
return (X_train, Y_train), (X_test, Y_test)
# 7. 분류 ANN 학습 결과 그래프 구현
import matplotlib.pyplot as plt
from keraspp.skeras import plot_loss, plot_acc
# 8. 분류 ANN 학습 및 성능 분석
def main():
Nin = 784
Nh = 100
number_of_class = 10
Nout = number_of_class
# model = ANN_models_func(Nin, Nh, Nout)
# model = ANN_seq_func(Nin, Nh, Nout)
# model = ANN_models_class(Nin, Nh, Nout)
model = ANN_seq_class(Nin, Nh, Nout)
(X_train, Y_train), (X_test, Y_test) = Data_func()
history = model.fit(X_train, Y_train, epochs=5, batch_size=100, validation_split=0.2)
performace_test = model.evaluate(X_test, Y_test, batch_size=100)
print('Test Loss and Accuracy ->', performace_test)
plot_loss(history)
plt.show()
plot_acc(history)
plt.show()
main()
###Output
Epoch 1/5
480/480 [==============================] - 2s 5ms/step - loss: 0.3895 - accuracy: 0.8926 - val_loss: 0.2169 - val_accuracy: 0.9393
Epoch 2/5
480/480 [==============================] - 2s 5ms/step - loss: 0.1900 - accuracy: 0.9463 - val_loss: 0.1638 - val_accuracy: 0.9528
Epoch 3/5
480/480 [==============================] - 2s 5ms/step - loss: 0.1431 - accuracy: 0.9585 - val_loss: 0.1367 - val_accuracy: 0.9614
Epoch 4/5
480/480 [==============================] - 2s 5ms/step - loss: 0.1131 - accuracy: 0.9674 - val_loss: 0.1218 - val_accuracy: 0.9645
Epoch 5/5
480/480 [==============================] - 2s 5ms/step - loss: 0.0917 - accuracy: 0.9735 - val_loss: 0.1117 - val_accuracy: 0.9672
100/100 [==============================] - 0s 3ms/step - loss: 0.1004 - accuracy: 0.9704
Test Loss and Accuracy -> [0.10043905675411224, 0.9703999757766724]
###Markdown
2.2 필기체를 구분하는 분류 ANN 구현분류 ANN은 클래스가 둘 이상인 데이터를 분류하는 인공지능 방법입니다. 2.2.1 분류 ANN을 위한 인공지능 모델 구현여기서는 분류 ANN의 구현과 관련하여 1단계, 2단계, 3단계를 다룹니다. 1. 케라스 패키지로부터 2가지 모듈을 불러옵니다.
###Code
from keras import layers, models
###Output
_____no_output_____
###Markdown
2. ANN 모델을 분산 방식으로 구현합니다.
###Code
def ANN_models_func(Nin, Nh, Nout):
x = layers.Input(shape=(Nin,))
h = layers.Activation('relu')(layers.Dense(Nh)(x))
y = layers.Activation('softmax')(layers.Dense(Nout)(h))
model = models.Model(x, y)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
###Output
_____no_output_____
###Markdown
3. ANN의 모델을 연쇄 방식으로 구현하는 방법을 알아봅니다
###Code
def ANN_seq_func(Nin, Nh, Nout):
model = models.Sequential()
model.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
model.add(layers.Dense(Nout, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
return model
###Output
_____no_output_____
###Markdown
4. 구현할 ANN 코드의 재사용성을 높이기 위해 객체지향 방식으로 구현할 수도 있습니다.
###Code
class ANN_models_class(models.Model):
def __init__(self, Nin, Nh, Nout):
# Prepare network layers and activate functions
hidden = layers.Dense(Nh)
output = layers.Dense(Nout)
relu = layers.Activation('relu')
softmax = layers.Activation('softmax')
# Connect network elements
x = layers.Input(shape=(Nin,))
h = relu(hidden(x))
y = softmax(output(h))
super().__init__(x, y)
self.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
5. 앞에서는 각 계층을 별도로 정의했는데 연쇄Sequential 방식을 사용하면 더 편리하게 모델링할 수 있습니다.
###Code
class ANN_seq_class(models.Sequential):
def __init__(self, Nin, Nh, Nout):
super().__init__()
self.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
self.add(layers.Dense(Nout, activation='softmax'))
self.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
2.2.2 분류 ANN에 사용할 데이터 불러오기 6. 인공지능으로 처리할 데이터를 불러오는 방법을 알아봅니다.
###Code
import numpy as np
from keras import datasets # mnist
from keras.utils import np_utils # to_categorical
def Data_func():
(X_train, y_train), (X_test, y_test) = datasets.mnist.load_data()
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
L, H, W = X_train.shape
X_train = X_train.reshape(-1, W * H)
X_test = X_test.reshape(-1, W * H)
X_train = X_train / 255.0
X_test = X_test / 255.0
return (X_train, Y_train), (X_test, Y_test)
###Output
_____no_output_____
###Markdown
2.2.3 분류 ANN 학습 결과 그래프 구현 7. ANN 학습 결과를 분석하는 방법을 알아봅니다. - 그래프를 그리는 라이브러리인 pyplot를 matplotlib 패키지로 부터 불러와서 plt로 이름을 재정의 합니다.
###Code
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
- 손실을 그리는 함수는 다음과 같습니다.
###Code
# from keraspp.skeras import plot_loss
def plot_loss(history, title=None):
if not isinstance(history, dict):
history = history.history
plt.plot(history['loss'])
plt.plot(history['val_loss'])
if title is not None:
plt.title(title)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(['Training', 'Validation'], loc=0)
###Output
_____no_output_____
###Markdown
- 다음은 정확도를 그리는 함수입니다.
###Code
# from keraspp.skeras import plot_acc
def plot_acc(history, title=None):
if not isinstance(history, dict):
history = history.history
plt.plot(history['accuracy'])
plt.plot(history['val_accuracy'])
if title is not None:
plt.title(title)
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(['Training', 'Validation'], loc=0)
###Output
_____no_output_____
###Markdown
2.2.4 분류 ANN 학습 및 성능 분석 8. ANN에 사용될 파라미터를 정의합니다. - ANN에 사용될 파라미터 4가지를 정의합니다.
###Code
Nin = 784
Nh = 100
number_of_class = 10
Nout = number_of_class
###Output
_____no_output_____
###Markdown
- 모델의 인스턴스를 만들고 데이터도 불러옵니다.
###Code
# model = ANN_models_func(Nin, Nh, Nout)
# model = ANN_seq_func(Nin, Nh, Nout)
# model = ANN_models_class(Nin, Nh, Nout)
model = ANN_seq_class(Nin, Nh, Nout)
(X_train, Y_train), (X_test, Y_test) = Data_func()
###Output
2021-09-23 17:43:53.247579: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1
2021-09-23 17:43:53.632230: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:53.632502: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1561] Found device 0 with properties:
pciBusID: 0000:01:00.0 name: NVIDIA GeForce GTX 1050 computeCapability: 6.1
coreClock: 1.455GHz coreCount: 5 deviceMemorySize: 2.00GiB deviceMemoryBandwidth: 104.43GiB/s
2021-09-23 17:43:53.636211: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1
2021-09-23 17:43:53.703323: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10
2021-09-23 17:43:53.747754: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10
2021-09-23 17:43:53.765934: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10
2021-09-23 17:43:53.838488: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10
2021-09-23 17:43:53.851999: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10
2021-09-23 17:43:53.979091: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7
2021-09-23 17:43:53.980027: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:53.981101: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:53.981412: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1703] Adding visible gpu devices: 0
2021-09-23 17:43:53.982565: I tensorflow/core/platform/cpu_feature_guard.cc:143] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA
2021-09-23 17:43:54.035395: I tensorflow/core/platform/profile_utils/cpu_utils.cc:102] CPU Frequency: 3599995000 Hz
2021-09-23 17:43:54.047349: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x55cb9b690930 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2021-09-23 17:43:54.047396: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version
2021-09-23 17:43:54.050270: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:54.050578: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1561] Found device 0 with properties:
pciBusID: 0000:01:00.0 name: NVIDIA GeForce GTX 1050 computeCapability: 6.1
coreClock: 1.455GHz coreCount: 5 deviceMemorySize: 2.00GiB deviceMemoryBandwidth: 104.43GiB/s
2021-09-23 17:43:54.050626: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1
2021-09-23 17:43:54.050642: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10
2021-09-23 17:43:54.050656: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10
2021-09-23 17:43:54.050669: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10
2021-09-23 17:43:54.050683: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10
2021-09-23 17:43:54.050696: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10
2021-09-23 17:43:54.050712: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7
2021-09-23 17:43:54.051316: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:54.052101: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:54.052285: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1703] Adding visible gpu devices: 0
2021-09-23 17:43:54.053000: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1
2021-09-23 17:43:54.594489: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1102] Device interconnect StreamExecutor with strength 1 edge matrix:
2021-09-23 17:43:54.594516: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1108] 0
2021-09-23 17:43:54.594555: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1121] 0: N
2021-09-23 17:43:54.595692: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:54.595911: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1330] Could not identify NUMA node of platform GPU id 0, defaulting to 0. Your kernel may not have been built with NUMA support.
2021-09-23 17:43:54.596532: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:54.597299: E tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:967] could not open file to read NUMA node: /sys/bus/pci/devices/0000:01:00.0/numa_node
Your kernel may have been built without NUMA support.
2021-09-23 17:43:54.597725: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1247] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 1421 MB memory) -> physical GPU (device: 0, name: NVIDIA GeForce GTX 1050, pci bus id: 0000:01:00.0, compute capability: 6.1)
2021-09-23 17:43:54.604408: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x55cb9f6441d0 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:
2021-09-23 17:43:54.604427: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): NVIDIA GeForce GTX 1050, Compute Capability 6.1
###Markdown
- 만들어진 인스턴스와 불러온 데이터를 이용해 모델을 학습하는 방법을 알아봅니다.
###Code
history = model.fit(X_train, Y_train, epochs=5, batch_size=100, validation_split=0.2)
###Output
2021-09-23 17:44:02.248997: W tensorflow/core/framework/cpu_allocator_impl.cc:81] Allocation of 376320000 exceeds 10% of free system memory.
2021-09-23 17:44:04.484273: W tensorflow/core/framework/cpu_allocator_impl.cc:81] Allocation of 376320000 exceeds 10% of free system memory.
2021-09-23 17:44:04.795776: W tensorflow/core/framework/cpu_allocator_impl.cc:81] Allocation of 301056000 exceeds 10% of free system memory.
###Markdown
- 학습이나 검증에 사용되지 않은 데이터(X_test, y_test)로 성능을 최종 평가한 결과를 살펴봅니다.
###Code
performace_test = model.evaluate(X_test, Y_test, batch_size=100)
print('Test Loss and Accuracy ->', performace_test)
###Output
29/100 [=======>......................] - ETA: 0s - loss: 0.1400 - accuracy: 0.9566
###Markdown
- 손실과 정확도의 추이를 그려봅니다
###Code
plot_loss(history)
plt.show()
plot_acc(history)
plt.show()
###Output
_____no_output_____
###Markdown
--- 2.2.5 전체 코드
###Code
# file: ex2_1_ann_mnist_cl.py
# 1.분류 ANN을 위한 인공지능 모델 구현
from keras import layers, models
# 2. 분산 방식 모델링을 포함하는 함수형 구현
def ANN_models_func(Nin, Nh, Nout):
x = layers.Input(shape=(Nin,))
h = layers.Activation('relu')(layers.Dense(Nh)(x))
y = layers.Activation('softmax')(layers.Dense(Nout)(h))
model = models.Model(x, y)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# 3. # 연쇄 방식 모델링을 포함하는 함수형 구현
def ANN_seq_func(Nin, Nh, Nout):
model = models.Sequential()
model.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
model.add(layers.Dense(Nout, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
return model
# 4. 분산 방식 모델링을 포함하는 객체지향형 구현
class ANN_models_class(models.Model):
def __init__(self, Nin, Nh, Nout):
# Prepare network layers and activate functions
hidden = layers.Dense(Nh)
output = layers.Dense(Nout)
relu = layers.Activation('relu')
softmax = layers.Activation('softmax')
# Connect network elements
x = layers.Input(shape=(Nin,))
h = relu(hidden(x))
y = softmax(output(h))
super().__init__(x, y)
self.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
# 5. 연쇄 방식 모델링을 포함하는 객체지향형 구현
class ANN_seq_class(models.Sequential):
def __init__(self, Nin, Nh, Nout):
super().__init__()
self.add(layers.Dense(Nh, activation='relu', input_shape=(Nin,)))
self.add(layers.Dense(Nout, activation='softmax'))
self.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
# 6. 분류 ANN에 사용할 데이터 불러오기
import numpy as np
from keras import datasets # mnist
from keras.utils import np_utils # to_categorical
def Data_func():
(X_train, y_train), (X_test, y_test) = datasets.mnist.load_data()
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
L, H, W = X_train.shape
X_train = X_train.reshape(-1, W * H)
X_test = X_test.reshape(-1, W * H)
X_train = X_train / 255.0
X_test = X_test / 255.0
return (X_train, Y_train), (X_test, Y_test)
# 7. 분류 ANN 학습 결과 그래프 구현
import matplotlib.pyplot as plt
from keraspp.skeras import plot_loss, plot_acc
# 8. 분류 ANN 학습 및 성능 분석
def main():
Nin = 784
Nh = 100
number_of_class = 10
Nout = number_of_class
# model = ANN_models_func(Nin, Nh, Nout)
# model = ANN_seq_func(Nin, Nh, Nout)
# model = ANN_models_class(Nin, Nh, Nout)
model = ANN_seq_class(Nin, Nh, Nout)
(X_train, Y_train), (X_test, Y_test) = Data_func()
history = model.fit(X_train, Y_train, epochs=5, batch_size=100, validation_split=0.2)
performace_test = model.evaluate(X_test, Y_test, batch_size=100)
print('Test Loss and Accuracy ->', performace_test)
plot_loss(history)
plt.show()
plot_acc(history)
plt.show()
main()
###Output
Epoch 1/5
480/480 [==============================] - 2s 5ms/step - loss: 0.3895 - accuracy: 0.8926 - val_loss: 0.2169 - val_accuracy: 0.9393
Epoch 2/5
480/480 [==============================] - 2s 5ms/step - loss: 0.1900 - accuracy: 0.9463 - val_loss: 0.1638 - val_accuracy: 0.9528
Epoch 3/5
480/480 [==============================] - 2s 5ms/step - loss: 0.1431 - accuracy: 0.9585 - val_loss: 0.1367 - val_accuracy: 0.9614
Epoch 4/5
480/480 [==============================] - 2s 5ms/step - loss: 0.1131 - accuracy: 0.9674 - val_loss: 0.1218 - val_accuracy: 0.9645
Epoch 5/5
480/480 [==============================] - 2s 5ms/step - loss: 0.0917 - accuracy: 0.9735 - val_loss: 0.1117 - val_accuracy: 0.9672
100/100 [==============================] - 0s 3ms/step - loss: 0.1004 - accuracy: 0.9704
Test Loss and Accuracy -> [0.10043905675411224, 0.9703999757766724]
|
Simple_Audio_Recognition.ipynb | ###Markdown
Simple audio recognition: Recognizing keywords
###Code
import os
import pathlib
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import models
from IPython import display
# Set seed for experiment reproducibility
seed = 42
tf.random.set_seed(seed)
np.random.seed(seed)
###Output
_____no_output_____
###Markdown
**Import the Speech Commands dataset**
###Code
data_dir = pathlib.Path('data/mini_speech_commands')
if not data_dir.exists():
tf.keras.utils.get_file(
'mini_speech_commands.zip',
origin="http://storage.googleapis.com/download.tensorflow.org/data/mini_speech_commands.zip",
extract=True,
cache_dir='.', cache_subdir='data')
commands = np.array(tf.io.gfile.listdir(str(data_dir)))
commands = commands[commands != 'README.md']
print('Commands:', commands)
###Output
Commands: ['down' 'yes' 'go' 'no' 'up' 'right' 'stop' 'left']
###Markdown
Now we will extract the audio files into a list and shuffle it.
###Code
filenames = tf.io.gfile.glob(str(data_dir) + '/*/*')
filenames = tf.random.shuffle(filenames)
num_samples = len(filenames)
print('Number of total examples:', num_samples)
print('Number of examples per label:',
len(tf.io.gfile.listdir(str(data_dir/commands[0]))))
print('Example file tensor:', filenames[0])
train_files = filenames[:6400]
val_files = filenames[6400: 6400 + 800]
test_files = filenames[-800:]
print('Training set size', len(train_files))
print('Validation set size', len(val_files))
print('Test set size', len(test_files))
###Output
Training set size 6400
Validation set size 800
Test set size 800
###Markdown
**Reading audio files and their labels**
###Code
def decode_audio(audio_binary):
audio, _ = tf.audio.decode_wav(audio_binary)
return tf.squeeze(audio, axis=-1)
def get_label(file_path):
parts = tf.strings.split(file_path, os.path.sep)
# Note: You'll use indexing here instead of tuple unpacking to enable this
# to work in a TensorFlow graph.
return parts[-2]
def get_waveform_and_label(file_path):
label = get_label(file_path)
audio_binary = tf.io.read_file(file_path)
waveform = decode_audio(audio_binary)
return waveform, label
AUTOTUNE = tf.data.AUTOTUNE
files_ds = tf.data.Dataset.from_tensor_slices(train_files)
waveform_ds = files_ds.map(get_waveform_and_label, num_parallel_calls=AUTOTUNE)
rows = 3
cols = 3
n = rows*cols
fig, axes = plt.subplots(rows, cols, figsize=(10, 12))
for i, (audio, label) in enumerate(waveform_ds.take(n)):
r = i // cols
c = i % cols
ax = axes[r][c]
ax.plot(audio.numpy())
ax.set_yticks(np.arange(-1.2, 1.2, 0.2))
label = label.numpy().decode('utf-8')
ax.set_title(label)
plt.show()
###Output
_____no_output_____
###Markdown
Spectrogram
###Code
def get_spectrogram(waveform):
# Padding for files with less than 16000 samples
zero_padding = tf.zeros([16000] - tf.shape(waveform), dtype=tf.float32)
# Concatenate audio with padding so that all audio clips will be of the
# same length
waveform = tf.cast(waveform, tf.float32)
equal_length = tf.concat([waveform, zero_padding], 0)
spectrogram = tf.signal.stft(
equal_length, frame_length=255, frame_step=128)
spectrogram = tf.abs(spectrogram)
return spectrogram
for waveform, label in waveform_ds.take(1):
label = label.numpy().decode('utf-8')
spectrogram = get_spectrogram(waveform)
print('Label:', label)
print('Waveform shape:', waveform.shape)
print('Spectrogram shape:', spectrogram.shape)
print('Audio playback')
display.display(display.Audio(waveform, rate=16000))
def plot_spectrogram(spectrogram, ax):
# Convert to frequencies to log scale and transpose so that the time is
# represented in the x-axis (columns). An epsilon is added to avoid log of zero.
log_spec = np.log(spectrogram.T+np.finfo(float).eps)
height = log_spec.shape[0]
width = log_spec.shape[1]
X = np.linspace(0, np.size(spectrogram), num=width, dtype=int)
Y = range(height)
ax.pcolormesh(X, Y, log_spec)
fig, axes = plt.subplots(2, figsize=(12, 8))
timescale = np.arange(waveform.shape[0])
axes[0].plot(timescale, waveform.numpy())
axes[0].set_title('Waveform')
axes[0].set_xlim([0, 16000])
plot_spectrogram(spectrogram.numpy(), axes[1])
axes[1].set_title('Spectrogram')
plt.show()
def get_spectrogram_and_label_id(audio, label):
spectrogram = get_spectrogram(audio)
spectrogram = tf.expand_dims(spectrogram, -1)
label_id = tf.argmax(label == commands)
return spectrogram, label_id
spectrogram_ds = waveform_ds.map(
get_spectrogram_and_label_id, num_parallel_calls=AUTOTUNE)
rows = 3
cols = 3
n = rows*cols
fig, axes = plt.subplots(rows, cols, figsize=(10, 10))
for i, (spectrogram, label_id) in enumerate(spectrogram_ds.take(n)):
r = i // cols
c = i % cols
ax = axes[r][c]
plot_spectrogram(np.squeeze(spectrogram.numpy()), ax)
ax.set_title(commands[label_id.numpy()])
ax.axis('off')
plt.show()
###Output
_____no_output_____
###Markdown
Build and train the model
###Code
def preprocess_dataset(files):
files_ds = tf.data.Dataset.from_tensor_slices(files)
output_ds = files_ds.map(get_waveform_and_label, num_parallel_calls=AUTOTUNE)
output_ds = output_ds.map(
get_spectrogram_and_label_id, num_parallel_calls=AUTOTUNE)
return output_ds
train_ds = spectrogram_ds
val_ds = preprocess_dataset(val_files)
test_ds = preprocess_dataset(test_files)
batch_size = 64
train_ds = train_ds.batch(batch_size)
val_ds = val_ds.batch(batch_size)
train_ds = train_ds.cache().prefetch(AUTOTUNE)
val_ds = val_ds.cache().prefetch(AUTOTUNE)
for spectrogram, _ in spectrogram_ds.take(1):
input_shape = spectrogram.shape
print('Input shape:', input_shape)
num_labels = len(commands)
norm_layer = layers.Normalization()
norm_layer.adapt(spectrogram_ds.map(lambda x, _: x))
model = models.Sequential([
layers.Input(shape=input_shape),
layers.Resizing(32, 32),
norm_layer,
layers.Conv2D(32, 3, activation='relu'),
layers.Conv2D(64, 3, activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.25),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dropout(0.5),
layers.Dense(num_labels),
])
model.summary()
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'],
)
EPOCHS = 10
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=EPOCHS,
callbacks=tf.keras.callbacks.EarlyStopping(verbose=1, patience=2),
)
metrics = history.history
plt.plot(history.epoch, metrics['loss'], metrics['val_loss'])
plt.legend(['loss', 'val_loss'])
plt.show()
###Output
_____no_output_____
###Markdown
**Evaluate test set performance**
###Code
test_audio = []
test_labels = []
for audio, label in test_ds:
test_audio.append(audio.numpy())
test_labels.append(label.numpy())
test_audio = np.array(test_audio)
test_labels = np.array(test_labels)
y_pred = np.argmax(model.predict(test_audio), axis=1)
y_true = test_labels
test_acc = sum(y_pred == y_true) / len(y_true)
print(f'Test set accuracy: {test_acc:.0%}')
###Output
Test set accuracy: 84%
###Markdown
**Display a confusion matrix**
###Code
confusion_mtx = tf.math.confusion_matrix(y_true, y_pred)
plt.figure(figsize=(10, 8))
sns.heatmap(confusion_mtx, xticklabels=commands, yticklabels=commands,
annot=True, fmt='g')
plt.xlabel('Prediction')
plt.ylabel('Label')
plt.show()
###Output
_____no_output_____
###Markdown
**Run inference on an audio file**
###Code
sample_file = data_dir/'no/01bb6a2a_nohash_0.wav'
sample_ds = preprocess_dataset([str(sample_file)])
for spectrogram, label in sample_ds.batch(1):
prediction = model(spectrogram)
plt.bar(commands, tf.nn.softmax(prediction[0]))
plt.title(f'Predictions for "{commands[label[0]]}"')
plt.show()
###Output
_____no_output_____ |
nbs/course2020/vision/02_MNIST.ipynb | ###Markdown
Lesson 2 - Image Classification Models from Scratch Lesson Video:
###Code
#hide_input
from IPython.lib.display import YouTubeVideo
YouTubeVideo('_SKqrTlXNt8')
#hide
#Run once per session
!pip install fastai wwf -q --upgrade
#hide_input
from wwf.utils import state_versions
state_versions(['fastai', 'fastcore', 'wwf'])
###Output
_____no_output_____
###Markdown
Grab our vision related libraries
###Code
from fastai.vision.all import *
###Output
_____no_output_____
###Markdown
Below you will find the exact imports for everything we use today
###Code
from torch import nn
from fastai.callback.hook import summary
from fastai.callback.schedule import fit_one_cycle, lr_find
from fastai.callback.progress import ProgressCallback
from fastai.data.core import Datasets, DataLoaders, show_at
from fastai.data.external import untar_data, URLs
from fastai.data.transforms import Categorize, GrandparentSplitter, parent_label, ToTensor, IntToFloatTensor, Normalize
from fastai.layers import Flatten
from fastai.learner import Learner
from fastai.metrics import accuracy, CrossEntropyLossFlat
from fastai.vision.augment import CropPad, RandomCrop, PadMode
from fastai.vision.core import PILImageBW
from fastai.vision.utils import get_image_files
###Output
_____no_output_____
###Markdown
And our data
###Code
path = untar_data(URLs.MNIST)
###Output
_____no_output_____
###Markdown
Working with the data
###Code
items = get_image_files(path)
items[0]
###Output
_____no_output_____
###Markdown
Create an image object. Done automatically with `ImageBlock`.
###Code
im = PILImageBW.create(items[0])
im.show()
###Output
_____no_output_____
###Markdown
Split our data with `GrandparentSplitter`, which will make use of a `train` and `valid` folder.
###Code
splits = GrandparentSplitter(train_name='training', valid_name='testing')
items[:3]
###Output
_____no_output_____
###Markdown
Splits need to be applied to some items
###Code
splits = splits(items)
splits[0][:5], splits[1][:5]
###Output
_____no_output_____
###Markdown
* Make a `Datasets`* Expects items, transforms for describing our problem, and a splitting method
###Code
dsrc = Datasets(items, tfms=[[PILImageBW.create], [parent_label, Categorize]],
splits=splits)
###Output
_____no_output_____
###Markdown
We can look at an item in our `Datasets` with `show_at`
###Code
show_at(dsrc.train, 3)
###Output
_____no_output_____
###Markdown
We can see that it's a `PILImage` of a three, along with a label of `3` Next we need to give ourselves some transforms on the data! These will need to:1. Ensure our images are all the same size2. Make sure our output are the `tensor` our models are wanting3. Give some image augmentation
###Code
tfms = [ToTensor(), CropPad(size=34, pad_mode=PadMode.Zeros), RandomCrop(size=28)]
###Output
_____no_output_____
###Markdown
* `ToTensor`: Converts to tensor* `CropPad` and `RandomCrop`: Resizing transforms* Applied on the `CPU` via `after_item`
###Code
gpu_tfms = [IntToFloatTensor(), Normalize()]
###Output
_____no_output_____
###Markdown
* `IntToFloatTensor`: Converts to a float* `Normalize`: Normalizes data
###Code
dls = dsrc.dataloaders(bs=128, after_item=tfms, after_batch=gpu_tfms)
###Output
_____no_output_____
###Markdown
And show a batch
###Code
dls.show_batch()
###Output
_____no_output_____
###Markdown
From here we need to see what our model will expect
###Code
xb, yb = dls.one_batch()
###Output
_____no_output_____
###Markdown
And now the shapes:
###Code
xb.shape, yb.shape
dls.c
###Output
_____no_output_____
###Markdown
So our input shape will be a [128 x 1 x 28 x 28] and our output shape will be a [128] tensor that we need to condense into 10 classes The ModelOur models are made up of **layers**, and each layer represents a matrix multiplication to end up with our final `y`. For this image problem, we will use a **Convolutional layer**, a **Batch Normalization layer**, an **Activation Function**, and a **Flattening layer** Convolutional LayerThese are always the first layer in our network. I will be borrowing an analogy from [here](https://adeshpande3.github.io/A-Beginner%27s-Guide-To-Understanding-Convolutional-Neural-Networks/) by Adit Deshpande.Our example Convolutional layer will be 5x5x1Imagine a flashlight that is shining over the top left of an image, which covers a 5x5 section of pixels at one given moment. This flashlight then slides crosses our pixels at all areas in the picture. This flashlight is called a **filter**, which can also be called a **neuron** or **kernel**. The region it is currently looking over is called a **receptive field**. This filter is also an array of numbers called **weights** (or **parameters**). The depth of this filter **must** be the same as the depth of our input. In our case it is 1 (in a color image this is 3). Now once this filter begins moving (or **convolving**) around the image, it is multiplying the values inside this filter with the original pixel value of our image (also called **element wise multiplications**). These are then summed up (in our case this is just one multiplication of 28x28) to an individual value, which is a representation of **just** the top left of our image. Now repeat this until every unique location has a number and we will get what is called an **activation** or **feature map**. This feature map will be 784 different locations, which turns into a 28x28 array
###Code
def conv(ni, nf): return nn.Conv2d(ni, nf, kernel_size=3, stride=2, padding=1)
###Output
_____no_output_____
###Markdown
Here we can see our `ni` is equivalent to the depth of the filter, and `nf` is equivalent to how many filters we will be using. (Fun fact this always has to be divisible by the size of our image). Batch NormalizationAs we send our tensors through our model, it is important to normalize our data throughout the network. Doing so can allow for a much larger improvement in training speed, along with allowing each layer to learn independantly (as each layer is then re-normalized according to it's outputs)
###Code
def bn(nf): return nn.BatchNorm2d(nf)
###Output
_____no_output_____
###Markdown
`nf` will be the same as the filter output from our previous convolutional layer Activation functionsThey give our models non-linearity and work with the `weights` we mentioned earlier along with a `bias` through a process called **back-propagation**. These allow our models to learn and perform more complex tasks because they can choose to fire or activate one of those neurons mentioned earlier. On a simple sense, let's look at the `ReLU` activation function. It operates by turning any negative values to zero, as visualized below: From "A Practical Guide to ReLU by Danqing Liu [URL](https://medium.com/@danqing/a-practical-guide-to-relu-b83ca804f1f7).
###Code
def ReLU(): return nn.ReLU(inplace=False)
###Output
_____no_output_____
###Markdown
FlatteningThe last bit we need to do is take all these activations and this outcoming matrix and flatten it into a single dimention of predictions. We do this with a `Flatten()` module
###Code
Flatten??
###Output
_____no_output_____
###Markdown
Making a Model* Five convolutional layers* `nn.Sequential`* 1 -> 32 -> 10
###Code
model = nn.Sequential(
conv(1, 8),
bn(8),
ReLU(),
conv(8, 16),
bn(16),
ReLU(),
conv(16,32),
bn(32),
ReLU(),
conv(32, 16),
bn(16),
ReLU(),
conv(16, 10),
bn(10),
Flatten()
)
###Output
_____no_output_____
###Markdown
Now let's make our `Learner`
###Code
learn = Learner(dls, model, loss_func=CrossEntropyLossFlat(), metrics=accuracy)
###Output
_____no_output_____
###Markdown
We can then also call `learn.summary` to take a look at all the sizes with thier **exact** output shapes
###Code
learn.summary()
###Output
_____no_output_____
###Markdown
`learn.summary` also tells us:* Total parameters* Trainable parameters* Optimizer* Loss function* Applied `Callbacks`
###Code
learn.lr_find()
###Output
_____no_output_____
###Markdown
Let's use a learning rate around 1e-1 (0.1)
###Code
learn.fit_one_cycle(3, lr_max=1e-1)
###Output
_____no_output_____
###Markdown
Simplify it * Try to make it more like `ResNet`.* `ConvLayer` contains a `Conv2d`, `BatchNorm2d`, and an activation function
###Code
def conv2(ni, nf): return ConvLayer(ni, nf, stride=2)
###Output
_____no_output_____
###Markdown
And make a new model
###Code
net = nn.Sequential(
conv2(1,8),
conv2(8,16),
conv2(16,32),
conv2(32,16),
conv2(16,10),
Flatten()
)
###Output
_____no_output_____
###Markdown
Great! That looks much better to read! Let's make sure we get (roughly) the same results with it.
###Code
learn = Learner(dls, net, loss_func=CrossEntropyLossFlat(), metrics=accuracy)
learn.fit_one_cycle(3, lr_max=1e-1)
###Output
_____no_output_____
###Markdown
Almost the exact same! Perfect! Now let's get a bit more advanced ResNet (kinda)The ResNet architecture is built with what are known as ResBlocks. Each of these blocks consist of two `ConvLayers` that we made before, where the number of filters do not change. Let's generate these layers.
###Code
class ResBlock(Module):
def __init__(self, nf):
self.conv1 = ConvLayer(nf, nf)
self.conv2 = ConvLayer(nf, nf)
def forward(self, x): return x + self.conv2(self.conv1(x))
###Output
_____no_output_____
###Markdown
* Class notation* `__init__`* `foward` Let's add these in between each of our `conv2` layers of that last model.
###Code
net = nn.Sequential(
conv2(1,8),
ResBlock(8),
conv2(8,16),
ResBlock(16),
conv2(16,32),
ResBlock(32),
conv2(32,16),
ResBlock(16),
conv2(16,10),
Flatten()
)
net
###Output
_____no_output_____
###Markdown
Awesome! We're building a pretty substantial model here. Let's try to make it **even simpler**. We know we call a convolutional layer before each `ResBlock` and they all have the same filters, so let's make that layer!
###Code
def conv_and_res(ni, nf): return nn.Sequential(conv2(ni, nf), ResBlock(nf))
net = nn.Sequential(
conv_and_res(1,8),
conv_and_res(8,16),
conv_and_res(16,32),
conv_and_res(32,16),
conv2(16,10),
Flatten()
)
###Output
_____no_output_____
###Markdown
And now we have something that resembles a ResNet! Let's see how it performs
###Code
learn = Learner(dls, net, loss_func=CrossEntropyLossFlat(), metrics=accuracy)
learn.lr_find()
###Output
_____no_output_____
###Markdown
Let's do 1e-1 again
###Code
learn.fit_one_cycle(3, lr_max=1e-1)
###Output
_____no_output_____
###Markdown
Lesson 2 - Image Classification Models from Scratch Lesson Video:
###Code
#hide_input
from IPython.lib.display import YouTubeVideo
YouTubeVideo('_SKqrTlXNt8')
#hide
#Run once per session
!pip install fastai wwf -q --upgrade
#hide_input
from wwf.utils import state_versions
state_versions(['fastai', 'fastcore', 'wwf'])
###Output
_____no_output_____
###Markdown
Grab our vision related libraries
###Code
from fastai.vision.all import *
###Output
_____no_output_____
###Markdown
Below you will find the exact imports for everything we use today
###Code
from torch import nn
from fastai.callback.hook import summary
from fastai.callback.schedule import fit_one_cycle, lr_find
from fastai.callback.progress import ProgressCallback
from fastai.data.core import Datasets, DataLoaders, show_at
from fastai.data.external import untar_data, URLs
from fastai.data.transforms import Categorize, GrandparentSplitter, parent_label, ToTensor, IntToFloatTensor, Normalize
from fastai.layers import Flatten
from fastai.learner import Learner
from fastai.metrics import accuracy, CrossEntropyLossFlat
from fastai.vision.augment import CropPad, RandomCrop, PadMode
from fastai.vision.core import PILImageBW
from fastai.vision.utils import get_image_files
###Output
_____no_output_____
###Markdown
And our data
###Code
path = untar_data(URLs.MNIST)
###Output
_____no_output_____
###Markdown
Working with the data
###Code
items = get_image_files(path)
items[0]
###Output
_____no_output_____
###Markdown
Create an image object. Done automatically with `ImageBlock`.
###Code
im = PILImageBW.create(items[0])
im.show()
###Output
_____no_output_____
###Markdown
Split our data with `GrandparentSplitter`, which will make use of a `train` and `valid` folder.
###Code
splits = GrandparentSplitter(train_name='training', valid_name='testing')
items[:3]
###Output
_____no_output_____
###Markdown
Splits need to be applied to some items
###Code
splits = splits(items)
splits[0][:5], splits[1][:5]
###Output
_____no_output_____
###Markdown
* Make a `Datasets`* Expects items, transforms for describing our problem, and a splitting method
###Code
dsrc = Datasets(items, tfms=[[PILImageBW.create], [parent_label, Categorize]],
splits=splits)
###Output
_____no_output_____
###Markdown
We can look at an item in our `Datasets` with `show_at`
###Code
show_at(dsrc.train, 3)
###Output
_____no_output_____
###Markdown
We can see that it's a `PILImage` of a three, along with a label of `3` Next we need to give ourselves some transforms on the data! These will need to:1. Ensure our images are all the same size2. Make sure our output are the `tensor` our models are wanting3. Give some image augmentation
###Code
tfms = [ToTensor(), CropPad(size=34, pad_mode=PadMode.Zeros), RandomCrop(size=28)]
###Output
_____no_output_____
###Markdown
* `ToTensor`: Converts to tensor* `CropPad` and `RandomCrop`: Resizing transforms* Applied on the `CPU` via `after_item`
###Code
gpu_tfms = [IntToFloatTensor(), Normalize()]
###Output
_____no_output_____
###Markdown
* `IntToFloatTensor`: Converts to a float* `Normalize`: Normalizes data
###Code
dls = dsrc.dataloaders(bs=128, after_item=tfms, after_batch=gpu_tfms)
###Output
_____no_output_____
###Markdown
And show a batch
###Code
dls.show_batch()
###Output
_____no_output_____
###Markdown
From here we need to see what our model will expect
###Code
xb, yb = dls.one_batch()
###Output
_____no_output_____
###Markdown
And now the shapes:
###Code
xb.shape, yb.shape
dls.c
###Output
_____no_output_____
###Markdown
So our input shape will be a [128 x 1 x 28 x 28] and our output shape will be a [128] tensor that we need to condense into 10 classes The ModelOur models are made up of **layers**, and each layer represents a matrix multiplication to end up with our final `y`. For this image problem, we will use a **Convolutional layer**, a **Batch Normalization layer**, an **Activation Function**, and a **Flattening layer** Convolutional LayerThese are always the first layer in our network. I will be borrowing an analogy from [here](https://adeshpande3.github.io/A-Beginner%27s-Guide-To-Understanding-Convolutional-Neural-Networks/) by Adit Deshpande.Our example Convolutional layer will be 5x5x1Imagine a flashlight that is shining over the top left of an image, which covers a 5x5 section of pixels at one given moment. This flashlight then slides crosses our pixels at all areas in the picture. This flashlight is called a **filter**, which can also be called a **neuron** or **kernel**. The region it is currently looking over is called a **receptive field**. This filter is also an array of numbers called **weights** (or **parameters**). The depth of this filter **must** be the same as the depth of our input. In our case it is 1 (in a color image this is 3). Now once this filter begins moving (or **convolving**) around the image, it is multiplying the values inside this filter with the original pixel value of our image (also called **element wise multiplications**). These are then summed up (in our case this is just one multiplication of 28x28) to an individual value, which is a representation of **just** the top left of our image. Now repeat this until every unique location has a number and we will get what is called an **activation** or **feature map**. This feature map will be 784 different locations, which turns into a 28x28 array In our model the Convolutional layer will 3x3 instead (kernel_size = 3) and it will move 2 pixels instead of 1 during each step (stride = 2) resulting in 14x14 feature map (as can be seen in `learner.summary()` below. To fully understand convolution layers and their parameters, there is an excellent tutorial [here](https://arxiv.org/pdf/1603.07285.pdf).
###Code
def conv(ni, nf): return nn.Conv2d(ni, nf, kernel_size=3, stride=2, padding=1, bias=False)
###Output
_____no_output_____
###Markdown
Here we can see our `ni` is equivalent to the depth of the filter, and `nf` is equivalent to how many filters we will be using. (Fun fact this always has to be divisible by the size of our image). Batch NormalizationAs we send our tensors through our model, it is important to normalize our data throughout the network. Doing so can allow for a much larger improvement in training speed, along with allowing each layer to learn independantly (as each layer is then re-normalized according to it's outputs)
###Code
def bn(nf): return nn.BatchNorm2d(nf)
###Output
_____no_output_____
###Markdown
`nf` will be the same as the filter output from our previous convolutional layer Activation functionsThey give our models non-linearity and work with the `weights` we mentioned earlier along with a `bias` through a process called **back-propagation**. These allow our models to learn and perform more complex tasks because they can choose to fire or activate one of those neurons mentioned earlier. On a simple sense, let's look at the `ReLU` activation function. It operates by turning any negative values to zero, as visualized below: From "A Practical Guide to ReLU by Danqing Liu [URL](https://medium.com/@danqing/a-practical-guide-to-relu-b83ca804f1f7).
###Code
def ReLU(): return nn.ReLU(inplace=False)
###Output
_____no_output_____
###Markdown
FlatteningThe last bit we need to do is take all these activations and this outcoming matrix and flatten it into a single dimention of predictions. We do this with a `Flatten()` module
###Code
Flatten??
###Output
_____no_output_____
###Markdown
Making a Model* Five convolutional layers* `nn.Sequential`* 1 -> 32 -> 10
###Code
model = nn.Sequential(
conv(1, 8),
bn(8),
ReLU(),
conv(8, 16),
bn(16),
ReLU(),
conv(16,32),
bn(32),
ReLU(),
conv(32, 16),
bn(16),
ReLU(),
conv(16, 10),
bn(10),
Flatten()
)
###Output
_____no_output_____
###Markdown
Now let's make our `Learner`
###Code
learn = Learner(dls, model, loss_func=CrossEntropyLossFlat(), metrics=accuracy)
###Output
_____no_output_____
###Markdown
We can then also call `learn.summary` to take a look at all the sizes with their **exact** output shapes
###Code
learn.summary()
###Output
_____no_output_____
###Markdown
`learn.summary` also tells us:* Total parameters* Trainable parameters* Optimizer* Loss function* Applied `Callbacks`
###Code
learn.lr_find()
###Output
_____no_output_____
###Markdown
Let's use a learning rate around 1e-1 (0.1)
###Code
learn.fit_one_cycle(3, lr_max=1e-1)
###Output
_____no_output_____
###Markdown
Simplify it * Try to make it more like `ResNet`.* `ConvLayer` contains a `Conv2d`, `BatchNorm2d`, and an activation function
###Code
def conv2(ni, nf): return ConvLayer(ni, nf, stride=2)
###Output
_____no_output_____
###Markdown
And make a new model
###Code
net = nn.Sequential(
conv2(1,8),
conv2(8,16),
conv2(16,32),
conv2(32,16),
conv2(16,10),
Flatten()
)
###Output
_____no_output_____
###Markdown
Great! That looks much better to read! Let's make sure we get (roughly) the same results with it.
###Code
learn = Learner(dls, net, loss_func=CrossEntropyLossFlat(), metrics=accuracy)
learn.fit_one_cycle(3, lr_max=1e-1)
###Output
_____no_output_____
###Markdown
Almost the exact same! Perfect! Now let's get a bit more advanced ResNet (kinda)The ResNet architecture is built with what are known as ResBlocks. Each of these blocks consist of two `ConvLayers` that we made before, where the number of filters do not change. Let's generate these layers.
###Code
class ResBlock(Module):
def __init__(self, nf):
self.conv1 = ConvLayer(nf, nf)
self.conv2 = ConvLayer(nf, nf)
def forward(self, x): return x + self.conv2(self.conv1(x))
###Output
_____no_output_____
###Markdown
* Class notation* `__init__`* `foward` Let's add these in between each of our `conv2` layers of that last model.
###Code
net = nn.Sequential(
conv2(1,8),
ResBlock(8),
conv2(8,16),
ResBlock(16),
conv2(16,32),
ResBlock(32),
conv2(32,16),
ResBlock(16),
conv2(16,10),
Flatten()
)
net
###Output
_____no_output_____
###Markdown
Awesome! We're building a pretty substantial model here. Let's try to make it **even simpler**. We know we call a convolutional layer before each `ResBlock` and they all have the same filters, so let's make that layer!
###Code
def conv_and_res(ni, nf): return nn.Sequential(conv2(ni, nf), ResBlock(nf))
net = nn.Sequential(
conv_and_res(1,8),
conv_and_res(8,16),
conv_and_res(16,32),
conv_and_res(32,16),
conv2(16,10),
Flatten()
)
###Output
_____no_output_____
###Markdown
And now we have something that resembles a ResNet! Let's see how it performs
###Code
learn = Learner(dls, net, loss_func=CrossEntropyLossFlat(), metrics=accuracy)
learn.lr_find()
###Output
_____no_output_____
###Markdown
Let's do 1e-1 again
###Code
learn.fit_one_cycle(3, lr_max=1e-1)
###Output
_____no_output_____ |
HW05/simpsons_baseline.ipynb | ###Markdown
**Физтех-Школа Прикладной математики и информатики (ФПМИ) МФТИ** Путешествие по Спрингфилду.Сегодня вам предстоить помочь телекомпании FOX в обработке их контента. Как вы знаете сериал Симсоны идет на телеэкранах более 25 лет и за это время скопилось очень много видео материала. Персоонажи менялись вместе с изменяющимися графическими технологиями и Гомер 2018 не очень похож на Гомера 1989. Нашей задачей будет научиться классифицировать персонажей проживающих в Спрингфилде. Думаю, что нет смысла представлять каждого из них в отдельности.  Установка зависимостей
###Code
!pip install -U torch torchvision
# установка подходящей версии torch
from os.path import exists
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())
cuda_output = !ldconfig -p|grep cudart.so|sed -e 's/.*\.\([0-9]*\)\.\([0-9]*\)$/cu\1\2/'
accelerator = cuda_output[0] if exists('/dev/nvidia0') else 'cpu'
!pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.4.1-{platform}-linux_x86_64.whl torchvision
import torch
# we will verify that GPU is enabled for this notebook
# following should print: CUDA is available! Training on GPU ...
#
# if it prints otherwise, then you need to enable GPU:
# from Menu > Runtime > Change Runtime Type > Hardware Accelerator > GPU
import torch
import numpy as np
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
# нам необходима версия pillow 5.3.0
# удалим старую версию и установим новую
!pip uninstall -y Pillow
!pip install Pillow==5.3.0
import PIL
print(PIL.PILLOW_VERSION)
# здесь должна быть версия 5.3.0. если это не так перехгрузите данный ноутбук:
# Menu > Runtime > Restart Runtime
from google.colab import drive
drive.mount('/content/gdrive/')
!unzip -q /content/gdrive/My\ Drive/simpsons/data/dataset.zip -d train
!unzip -q /content/gdrive/My\ Drive/simpsons/data/testset.zip -d test
!ls train
!nvidia-smi
import torch
torch.cuda.is_available()
###Output
Sun Dec 23 09:41:05 2018
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 396.44 Driver Version: 396.44 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 Tesla K80 Off | 00000000:00:04.0 Off | 0 |
| N/A 64C P8 31W / 149W | 11MiB / 11441MiB | 0% Default |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: GPU Memory |
| GPU PID Type Process name Usage |
|=============================================================================|
| No running processes found |
+-----------------------------------------------------------------------------+
###Markdown
В нашем тесте будет 990 картнок, для которых вам будет необходимо предсказать класс.
###Code
import pickle
import numpy as np
from skimage import io
from tqdm import tqdm, tqdm_notebook
from PIL import Image
from pathlib import Path
from torchvision import transforms
from multiprocessing.pool import ThreadPool
from sklearn.preprocessing import LabelEncoder
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
from matplotlib import colors, pyplot as plt
%matplotlib inline
# в sklearn не все гладко, чтобы в colab удобно выводить картинки
# мы будем игнорировать warnings
import warnings
warnings.filterwarnings(action='ignore', category=DeprecationWarning)
# разные режимы датасета
DATA_MODES = ['train', 'val', 'test']
# все изображения будут масштабированы к размеру 224x224 px
RESCALE_SIZE = 224
# работаем на видеокарте
DEVICE = torch.device("cuda")
###Output
_____no_output_____
###Markdown
https://jhui.github.io/2018/02/09/PyTorch-Data-loading-preprocess_torchvision/ Ниже мы исспользуем враппер над датасетом для удобной работы. Вам стоит понимать, что происходит с LabelEncoder и с torch.Transformation. ToTensor конвертирует PIL Image с параметрами в диапазоне [0, 255] (как все пиксели) в FloatTensor размера (C x H x W) [0,1] , затем производится масштабирование:$input = \frac{input - \mu}{\text{standard deviation}} $, константы - средние и дисперсии по каналам на основе ImageNetСтоит также отметить, что мы переопределяем метод __getitem__ для удобства работы с данной структурой данных. Также используется LabelEncoder для преобразования строковых меток классов в id и обратно. В описании датасета указано, что картинки разного размера, так как брались напрямую с видео, поэтому следуем привести их к одному размер (это делает метод _prepare_sample)
###Code
class SimpsonsDataset(Dataset):
"""
Датасет с картинками, который паралельно подгружает их из папок
производит скалирование и превращение в торчевые тензоры
"""
def __init__(self, files, mode):
super().__init__()
# список файлов для загрузки
self.files = sorted(files)
# режим работы
self.mode = mode
if self.mode not in DATA_MODES:
print(f"{self.mode} is not correct; correct modes: {DATA_MODES}")
raise NameError
self.len_ = len(self.files)
self.label_encoder = LabelEncoder()
if self.mode != 'test':
self.labels = [path.parent.name for path in self.files]
self.label_encoder.fit(self.labels)
with open('label_encoder.pkl', 'wb') as le_dump_file:
pickle.dump(self.label_encoder, le_dump_file)
def __len__(self):
return self.len_
def load_sample(self, file):
image = Image.open(file)
image.load()
return image
def __getitem__(self, index):
# для преобразования изображений в тензоры PyTorch и нормализации входа
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
x = self.load_sample(self.files[index])
x = self._prepare_sample(x)
x = np.array(x / 255, dtype='float32')
x = transform(x)
if self.mode == 'test':
return x
else:
label = self.labels[index]
label_id = self.label_encoder.transform([label])
y = label_id.item()
return x, y
def _prepare_sample(self, image):
image = image.resize((RESCALE_SIZE, RESCALE_SIZE))
return np.array(image)
def imshow(inp, title=None, plt_ax=plt, default=False):
"""Imshow для тензоров"""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt_ax.imshow(inp)
if title is not None:
plt_ax.set_title(title)
plt_ax.grid(False)
TRAIN_DIR = Path('train/dataset')
TEST_DIR = Path('test/testset')
train_val_files = sorted(list(TRAIN_DIR.rglob('*.jpg')))
test_files = sorted(list(TEST_DIR.rglob('*.jpg')))
from sklearn.model_selection import train_test_split
train_val_labels = [path.parent.name for path in train_val_files]
train_files, val_files = train_test_split(train_val_files, test_size=0.25, \
stratify=train_val_labels)
val_dataset = SimpsonsDataset(val_files, mode='val')
# uncomment if you have problem with pillow
# def register_extension(id, extension): Image.EXTENSION[extension.lower()] = id.upper()
# Image.register_extension = register_extension
# def register_extensions(id, extensions):
# for extension in extensions: register_extension(id, extension)
# Image.register_extensions = register_extensions
###Output
_____no_output_____
###Markdown
Давайте посмотрим на наших героев внутри датасета.
###Code
fig, ax = plt.subplots(nrows=3, ncols=3,figsize=(8, 8), \
sharey=True, sharex=True)
for fig_x in ax.flatten():
random_characters = int(np.random.uniform(0,1000))
im_val, label = val_dataset[random_characters]
img_label = " ".join(map(lambda x: x.capitalize(),\
val_dataset.label_encoder.inverse_transform([label])[0].split('_')))
imshow(im_val.data.cpu(), \
title=img_label,plt_ax=fig_x)
###Output
_____no_output_____
###Markdown
Можете добавить ваши любимые сцены и классифицировать их. (веселые результаты можно кидать в чат) Построение нейросетиЗапустить данную сеть будет вашим мини-заданием на первую неделю, чтобы было проще участвовать в соревновании.Данная архитектура будет очень простой и нужна для того, чтобы установить базовое понимание и получить простенький сабмит на Kaggle*Описание слоев*:1. размерность входа: 3x224x224 2.размерности после слоя: 8x111x1113. 16x54x544. 32x26x265. 64x12x126. выход: 96x5x5
###Code
# Очень простая сеть
class SimpleCnn(nn.Module):
def __init__(self, n_classes):
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=8, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv4 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.conv5 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=96, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
self.out = nn.Linear(96 * 5 * 5, n_classes)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = x.view(x.size(0), -1)
logits = self.out(x)
return logits
def fit_epoch(model, train_loader, criterion, optimizer):
running_loss = 0.0
running_corrects = 0
processed_data = 0
for inputs, labels in train_loader:
inputs = inputs.to(DEVICE)
labels = labels.to(DEVICE)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
preds = torch.argmax(outputs, 1)
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
processed_data += inputs.size(0)
train_loss = running_loss / processed_data
train_acc = running_corrects.cpu().numpy() / processed_data
return train_loss, train_acc
def eval_epoch(model, val_loader, criterion):
model.eval()
running_loss = 0.0
running_corrects = 0
processed_size = 0
for inputs, labels in val_loader:
inputs = inputs.to(DEVICE)
labels = labels.to(DEVICE)
with torch.set_grad_enabled(False):
outputs = model(inputs)
loss = criterion(outputs, labels)
preds = torch.argmax(outputs, 1)
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
processed_size += inputs.size(0)
val_loss = running_loss / processed_size
val_acc = running_corrects.double() / processed_size
return val_loss, val_acc
def train(train_files, val_files, model, epochs, batch_size):
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
history = []
log_template = "\nEpoch {ep:03d} train_loss: {t_loss:0.4f} \
val_loss {v_loss:0.4f} train_acc {t_acc:0.4f} val_acc {v_acc:0.4f}"
with tqdm(desc="epoch", total=epochs) as pbar_outer:
opt = torch.optim.Adam(model.parameters())
criterion = nn.CrossEntropyLoss()
for epoch in range(epochs):
train_loss, train_acc = fit_epoch(model, train_loader, criterion, opt)
print("loss", train_loss)
val_loss, val_acc = eval_epoch(model, val_loader, criterion)
history.append((train_loss, train_acc, val_loss, val_acc))
pbar_outer.update(1)
tqdm.write(log_template.format(ep=epoch+1, t_loss=train_loss,\
v_loss=val_loss, t_acc=train_acc, v_acc=val_acc))
return history
def predict(model, test_loader):
with torch.no_grad():
logits = []
for inputs in test_loader:
inputs = inputs.to(DEVICE)
model.eval()
outputs = model(inputs).cpu()
logits.append(outputs)
probs = nn.functional.softmax(torch.cat(logits), dim=-1).numpy()
return probs
n_classes = len(np.unique(train_val_labels))
simple_cnn = SimpleCnn(n_classes).to(DEVICE)
print("we will classify :{}".format(n_classes))
print(simple_cnn)
###Output
we will classify :42
SimpleCnn(
(conv1): Sequential(
(0): Conv2d(3, 8, kernel_size=(3, 3), stride=(1, 1))
(1): ReLU()
(2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(conv2): Sequential(
(0): Conv2d(8, 16, kernel_size=(3, 3), stride=(1, 1))
(1): ReLU()
(2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(conv3): Sequential(
(0): Conv2d(16, 32, kernel_size=(3, 3), stride=(1, 1))
(1): ReLU()
(2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(conv4): Sequential(
(0): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1))
(1): ReLU()
(2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(conv5): Sequential(
(0): Conv2d(64, 96, kernel_size=(3, 3), stride=(1, 1))
(1): ReLU()
(2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(out): Linear(in_features=2400, out_features=42, bias=True)
)
###Markdown
Запустим обучение сети.
###Code
if val_dataset is None:
val_dataset = SimpsonsDataset(val_files, mode='val')
train_dataset = SimpsonsDataset(train_files, mode='train')
history = train(train_dataset, val_dataset, model=simple_cnn, epochs=2, batch_size=64)
###Output
epoch: 0%| | 0/2 [00:00<?, ?it/s]
###Markdown
Построим кривые обучения
###Code
loss, acc, val_loss, val_acc = zip(*history)
plt.figure(figsize=(15, 9))
plt.plot(loss, label="train_loss")
plt.plot(val_loss, label="val_loss")
plt.legend(loc='best')
plt.xlabel("epochs")
plt.ylabel("loss")
plt.show()
###Output
_____no_output_____
###Markdown
Ну и что теперь со всем этим делать?  Хорошо бы понять, как сделать сабмит. У нас есть сеть и методы eval у нее, которые позволяют перевести сеть в режим предсказания. Стоит понимать, что у нашей модели на последнем слое стоит softmax, которые позволяет получить вектор вероятностей того, что объект относится к тому или иному классу. Давайте воспользуемся этим.
###Code
def predict_one_sample(model, inputs, device=DEVICE):
"""Предсказание, для одной картинки"""
with torch.no_grad():
inputs = inputs.to(device)
model.eval()
logit = model(inputs).cpu()
probs = torch.nn.functional.softmax(logit, dim=-1).numpy()
return probs
random_characters = int(np.random.uniform(0,1000))
ex_img, true_label = val_dataset[random_characters]
probs_im = predict_one_sample(simple_cnn, ex_img.unsqueeze(0))
idxs = list(map(int, np.random.uniform(0,1000, 20)))
imgs = [val_dataset[id][0].unsqueeze(0) for id in idxs]
probs_ims = predict(simple_cnn, imgs)
label_encoder = pickle.load(open("label_encoder.pkl", 'rb'))
y_pred = np.argmax(probs_ims,-1)
actual_labels = [val_dataset[id][1] for id in idxs]
preds_class = [label_encoder.classes_[i] for i in y_pred]
###Output
_____no_output_____
###Markdown
Обратите внимание, что метрика, которую необходимо оптимизировать в конкурсе --- f1-score. Вычислим целевую метрику на валидационной выборке.
###Code
from sklearn.metrics import f1_score
f1_score(actual_labels, preds_class)
###Output
_____no_output_____
###Markdown
Сделаем классную визуализацию, чтобы посмотреть насколько сеть уверена в своих ответах. Можете исспользовать это, чтобы отлаживать правильность вывода.
###Code
import matplotlib.patches as patches
from matplotlib.font_manager import FontProperties
fig, ax = plt.subplots(nrows=3, ncols=3,figsize=(12, 12), \
sharey=True, sharex=True)
for fig_x in ax.flatten():
random_characters = int(np.random.uniform(0,1000))
im_val, label = val_dataset[random_characters]
img_label = " ".join(map(lambda x: x.capitalize(),\
val_dataset.label_encoder.inverse_transform([label])[0].split('_')))
imshow(im_val.data.cpu(), \
title=img_label,plt_ax=fig_x)
actual_text = "Actual : {}".format(img_label)
fig_x.add_patch(patches.Rectangle((0, 53),86,35,color='white'))
font0 = FontProperties()
font = font0.copy()
font.set_family("fantasy")
prob_pred = predict_one_sample(simple_cnn, im_val.unsqueeze(0))
predicted_proba = np.max(prob_pred)*100
y_pred = np.argmax(prob_pred)
predicted_label = label_encoder.classes_[y_pred]
predicted_label = predicted_label[:len(predicted_label)//2] + '\n' + predicted_label[len(predicted_label)//2:]
predicted_text = "{} : {:.0f}%".format(predicted_label,predicted_proba)
fig_x.text(1, 59, predicted_text , horizontalalignment='left', fontproperties=font,
verticalalignment='top',fontsize=8, color='black',fontweight='bold')
###Output
/usr/local/lib/python3.6/dist-packages/matplotlib/font_manager.py:1320: UserWarning: findfont: Font family ['fantasy'] not found. Falling back to DejaVu Sans
(prop.get_family(), self.defaultFamily[fontext]))
###Markdown
Попробуйте найти те классы, которые сеть не смогла расспознать. Изучите данную проблему, это понадобится в дальнейшем. Submit на Kaggle 
###Code
test_dataset = SimpsonsDataset(test_files, mode="test")
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=64)
probs = predict(simple_cnn, test_loader)
preds = label_encoder.inverse_transform(np.argmax(probs, axis=1))
test_filenames = [path.name for path in test_dataset.files]
! ls
import pandas as pd
my_submit = pd.read_csv("gdrive/My Drive/simpsons/data/labels.csv")
# my_submit = pd.DataFrame({'Image_id': test_filenames, 'Expected': preds})
my_submit.head()
# TODO : сделайте сабмит (это важно, если Вы не справляетесь, но дошли до этой ячейки, то сообщите в чат и Вам помогут)
my_submit.to_csv('gdrive/My Drive/simpsons/simple_cnn_baseline.csv', index=False)
###Output
_____no_output_____ |
.ipynb_checkpoints/Lesson 2.3. PowerGrid Model API - Using SPARQL Queries-checkpoint.ipynb | ###Markdown
Lesson 2.2: Using the PowerGrid Models API with SPARQL QueriesThis tutorial focuses on how to make generic queries of the PowerGrid Models API to obtain names, mRIDs, measurements, and control setting objects modeled in CIM XML for common power system equipment used in GridAPPS-D.The lesson reviews the format used for making generic SPARQL queries and then presents an extensive catalog of cut-and-paste code blocks for the most common queries.__Learning Objectives:__At the end of the tutorial, the user should be able to use the PowerGrid Models API to* * * Getting StartedBefore running any of the sample routines in this tutorial, it is first necessary to start the GridAPPS-D Platform and establish a connection to this notebook so that we can start passing calls to the API. _Open the Ubuntu terminal and start the GridAPPS-D Platform if it is not running already:_`cd gridappsd-docker`~/gridappsd-docker$ `./run.sh -t develop`_Once containers are running,_gridappsd@[container]:/gridappsd$ `./run-gridappsd.sh`
###Code
# Establish connection to GridAPPS-D Platform:
from gridappsd import GridAPPSD
gapps = GridAPPSD("('localhost', 61613)", username='system', password='manager')
model_mrid = "_49AD8E07-3BF9-A4E2-CB8F-C3722F837B62" # IEEE 13 Node used for all example queries
###Output
_____no_output_____
###Markdown
--- Table of Contents* [1. Structure of Generic SPARQL Queries](1.-Structure-of-Generic-SPARQL-Queries)* [2. Making SPARQL Queries using the GridAPPSD-Python API](2.-Making-SPARQL-Queries-using-the-GridAPPSD-Python-API)* [3. Making SPARQL Queries using the STOMP Client](3.-Making-SPARQL-Queries-using-the-STOMP-Client)* [4. Making SPARQL Queries using the Blazegraph Workbench](4.-Making-SPARQL-Queries-using-the-Blazegraph-Workbench)* [5. Catalog of Common SPARQL Queries for CIM Objects](5.-Catalog-of-Common-SPARQL-Queries-for-CIM-Objects) 1. Structure of Generic SPARQL QueriesThe format of SPARQL queries was discussed in detail previously in [Lesson 1.7](). 2. Making SPARQL Queries using the GridAPPSD-Python API --- 3. Making SPARQL Queries using the Blazegraph WorkbenchOpen the [Blazegraph Workbench](http://localhost:8889/bigdata/query) hosted on [localhost:8889/bigdata](http://localhost:8889/bigdata/query)In the query input window, it is possible to directly copy and paste queries to the Blazegraph database. --- 5. Catalog of Common SPARQL Queries for CIM ObjectsThe sections below present common power system equipment and measurement objects. The 5.1. Queries for all feeder models and core objects List all the feeders, with substations and regions
###Code
query = """
# list all the feeders, with substations and regions - DistFeeder
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?feeder ?fid ?station ?sid ?subregion ?sgrid ?region ?rgnid WHERE {
?s r:type c:Feeder.
?s c:IdentifiedObject.name ?feeder.
?s c:IdentifiedObject.mRID ?fid.
?s c:Feeder.NormalEnergizingSubstation ?sub.
?sub c:IdentifiedObject.name ?station.
?sub c:IdentifiedObject.mRID ?sid.
?sub c:Substation.Region ?sgr.
?sgr c:IdentifiedObject.name ?subregion.
?sgr c:IdentifiedObject.mRID ?sgrid.
?sgr c:SubGeographicalRegion.Region ?rgn.
?rgn c:IdentifiedObject.name ?region.
?rgn c:IdentifiedObject.mRID ?rgnid.
}
ORDER by ?station ?feeder
"""
# Preview API call output for the query
gapps.query_data(query)
###Output
_____no_output_____
###Markdown
2. Get all mRID values by class and name
###Code
query = """
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?type ?name ?id WHERE {
?s c:IdentifiedObject.name ?name.
?s c:IdentifiedObject.mRID ?id.
?s r:type ?rawtype.
bind(strafter(str(?rawtype),"#") as ?type)
}
ORDER by ?type ?name
"""
# Preview API call output
gapps.query_data(query)
###Output
_____no_output_____
###Markdown
1. Querying for Buses and Nodes 1.1. List the bus name and xy coordinatesNote: this query is the basis of qbus_template in InsertDER.py
###Code
query = """
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?bus ?seq ?locid ?x ?y WHERE { # ?cnid ?tid ?eqid
VALUES ?fdrid {"%s"} # inserts model_mrid
?fdr c:IdentifiedObject.mRID ?fdrid.
?cn c:ConnectivityNode.ConnectivityNodeContainer ?fdr.
?trm c:Terminal.ConnectivityNode ?cn.
?trm c:ACDCTerminal.sequenceNumber ?seq.
?trm c:Terminal.ConductingEquipment ?eq.
?eq c:PowerSystemResource.Location ?loc.
?trm c:IdentifiedObject.mRID ?tid.
?cn c:IdentifiedObject.mRID ?cnid.
?cn c:IdentifiedObject.name ?bus.
?eq c:IdentifiedObject.mRID ?eqid.
?loc c:IdentifiedObject.mRID ?locid.
?pt c:PositionPoint.Location ?loc.
# caution - these next three triples make the query very slow, uncomment only if needed
# ?pt c:PositionPoint.sequenceNumber ?seq.
# ?pt c:PositionPoint.xPosition ?x.
# ?pt c:PositionPoint.yPosition ?y
}
ORDER BY ?bus ?locid
""" % model_mrid
# Preview API call output for the query on the IEEE 13 node model
gapps.query_data(query)
###Output
_____no_output_____
###Markdown
List all the connectivity nodes by feeder
###Code
query = """
# list all the connectivity nodes by feeder
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?feeder ?name WHERE {
VALUES ?fdrid {"%s"} # inserts model_mrid
?fdr c:IdentifiedObject.mRID ?fdrid.
?s c:ConnectivityNode.ConnectivityNodeContainer ?fdr.
?s r:type c:ConnectivityNode.
?s c:IdentifiedObject.name ?name.
?fdr c:IdentifiedObject.name ?feeder.
}
ORDER by ?feeder ?name
""" % model_mrid
# Preview API call output for the query on the IEEE 13 node model
gapps.query_data(query)
###Output
_____no_output_____
###Markdown
List all the connectivity nodes by feeder, with voltage limits
###Code
query = '''
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?feeder ?bus ?cnid ?val ?dur ?dir WHERE {
VALUES ?fdrid {"%s"} # inserts model_mrid
?fdr c:IdentifiedObject.mRID ?fdrid.
?s c:ConnectivityNode.ConnectivityNodeContainer ?fdr.
?s r:type c:ConnectivityNode.
?s c:IdentifiedObject.name ?bus.
?s c:IdentifiedObject.mRID ?cnid.
?fdr c:IdentifiedObject.name ?feeder.
?s c:ConnectivityNode.OperationalLimitSet ?ols.
?vlim c:OperationalLimit.OperationalLimitSet ?ols.
?vlim r:type c:VoltageLimit.
?vlim c:OperationalLimit.OperationalLimitType ?olt.
?olt c:OperationalLimitType.acceptableDuration ?dur.
?olt c:OperationalLimitType.direction ?rawdir.
bind(strafter(str(?rawdir),"OperationalLimitDirectionKind.") as ?dir)
?vlim c:VoltageLimit.value ?val.
}
ORDER by ?feeder ?bus ?val
''' % model_mrid
# Preview API call output for the query on the IEEE 13 node model
gapps.query_data(query)
###Output
_____no_output_____
###Markdown
1.2. List all the connectivity node base voltages by feeder, for sensor service
###Code
query = """
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT DISTINCT ?feeder ?busname ?cnid ?nomv WHERE {
VALUES ?fdrid {"%s"} # inserts model_mrid
?fdr c:IdentifiedObject.mRID ?fdrid.
?bus c:ConnectivityNode.ConnectivityNodeContainer ?fdr.
?bus r:type c:ConnectivityNode.
?bus c:IdentifiedObject.name ?busname.
?bus c:IdentifiedObject.mRID ?cnid.
?fdr c:IdentifiedObject.name ?feeder.
?trm c:Terminal.ConnectivityNode ?bus.
?trm c:Terminal.ConductingEquipment ?ce.
?ce c:ConductingEquipment.BaseVoltage ?bv.
?bv c:BaseVoltage.nominalVoltage ?nomv.
}
ORDER by ?feeder ?busname ?nomv
""" % model_mrid
# Preview API call output for the query on the IEEE 13 node model
gapps.query_data(query)
###Output
_____no_output_____
###Markdown
List all the connectivity node base voltages by feeder, for visualization
###Code
query = """
# list all the connectivity node base voltages by feeder, for visualization
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT DISTINCT ?feeder ?busname ?nomv WHERE {
VALUES ?fdrid {"%s"} # inserts model_mrid
?fdr c:IdentifiedObject.mRID ?fdrid.
?bus c:ConnectivityNode.ConnectivityNodeContainer ?fdr.
?bus r:type c:ConnectivityNode.
?bus c:IdentifiedObject.name ?busname.
?fdr c:IdentifiedObject.name ?feeder.
?trm c:Terminal.ConnectivityNode ?bus.
?trm c:Terminal.ConductingEquipment ?ce.
?ce c:ConductingEquipment.BaseVoltage ?bv.
?bv c:BaseVoltage.nominalVoltage ?nomv.
}
ORDER by ?feeder ?busname ?nomv
""" % model_mrid
# Preview API call output for the query on the IEEE 13 node model
gapps.query_data(query)
###Output
_____no_output_____
###Markdown
Querying for Switching EquipmentThe sample queries below present the most commonly needed queries for power system model information related to switching objects. The models currently stored in GridAPPS-D only use the _LoadBreakSwitch_ object for switching objects, so queries for other classes of switching objects are commented out in the sample code blocks below. Fuse, Breaker, Recloser, LoadBreakSwitch, SectionaliserThis query features several different types of distribution switches available within CIM XML. The models currently stored in GridAPPS-D only use the _LoadBreakSwitch_ object for switching objects, so the other queries are commented out in this sample code block.Note: the sectionalizer object will be supported in a future release.
###Code
query = """
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?name ?basev ?open ?continuous ?breaking ?fdrid (group_concat(distinct ?bus;separator="\n") as ?buses) (group_concat(distinct ?phs;separator="\n") as ?phases) WHERE {
# ?s r:type c:Sectionaliser.
# ?s r:type c:Disconnector.
# ?s r:type c:Fuse.
# ?s r:type c:Recloser.
# ?s r:type c:Breaker.
?s r:type c:LoadBreakSwitch.
?s c:Equipment.EquipmentContainer ?fdr.
?fdr c:IdentifiedObject.mRID ?fdrid.
?s c:IdentifiedObject.name ?name.
?s c:ConductingEquipment.BaseVoltage ?bv.
?bv c:BaseVoltage.nominalVoltage ?basev.
?s c:Switch.normalOpen ?open.
?s c:Switch.ratedCurrent ?continuous.
OPTIONAL {?s c:ProtectedSwitch.breakingCapacity ?breaking.}
?t c:Terminal.ConductingEquipment ?s.
?t c:Terminal.ConnectivityNode ?cn.
?cn c:IdentifiedObject.name ?bus
OPTIONAL {?swp c:SwitchPhase.Switch ?s.
?swp c:SwitchPhase.phaseSide1 ?phsraw.
bind(strafter(str(?phsraw),"SinglePhaseKind.") as ?phs) }
}
GROUP BY ?name ?basev ?open ?continuous ?breaking ?fdrid
ORDER BY ?name
"""
# Preview API call output for the query on the IEEE 13 node model
gapps.query_data(query)
query = '''
# Storage - DistStorage
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?name ?bus ?ratedS ?ratedU ?ipu ?ratedE ?storedE ?state ?p ?q ?id ?fdrid (group_concat(distinct ?phs;separator="\\n") as ?phases) WHERE {
?s r:type c:BatteryUnit.
?s c:IdentifiedObject.name ?name.
?pec c:PowerElectronicsConnection.PowerElectronicsUnit ?s.
# feeder selection options - if all commented out, query matches all feeders
#VALUES ?fdrid {"_C1C3E687-6FFD-C753-582B-632A27E28507"} # 123 bus
#VALUES ?fdrid {"_49AD8E07-3BF9-A4E2-CB8F-C3722F837B62"} # 13 bus
#VALUES ?fdrid {"_5B816B93-7A5F-B64C-8460-47C17D6E4B0F"} # 13 bus assets
#VALUES ?fdrid {"_4F76A5F9-271D-9EB8-5E31-AA362D86F2C3"} # 8500 node
#VALUES ?fdrid {"_67AB291F-DCCD-31B7-B499-338206B9828F"} # J1
#VALUES ?fdrid {"_9CE150A8-8CC5-A0F9-B67E-BBD8C79D3095"} # R2 12.47 3
?pec c:Equipment.EquipmentContainer ?fdr.
?fdr c:IdentifiedObject.mRID ?fdrid.
?pec c:PowerElectronicsConnection.ratedS ?ratedS.
?pec c:PowerElectronicsConnection.ratedU ?ratedU.
?pec c:PowerElectronicsConnection.maxIFault ?ipu.
?s c:BatteryUnit.ratedE ?ratedE.
?s c:BatteryUnit.storedE ?storedE.
?s c:BatteryUnit.batteryState ?stateraw.
bind(strafter(str(?stateraw),"BatteryState.") as ?state)
?pec c:PowerElectronicsConnection.p ?p.
?pec c:PowerElectronicsConnection.q ?q.
OPTIONAL {?pecp c:PowerElectronicsConnectionPhase.PowerElectronicsConnection ?pec.
?pecp c:PowerElectronicsConnectionPhase.phase ?phsraw.
bind(strafter(str(?phsraw),"SinglePhaseKind.") as ?phs) }
bind(strafter(str(?s),"#_") as ?id).
?t c:Terminal.ConductingEquipment ?pec.
?t c:Terminal.ConnectivityNode ?cn.
?cn c:IdentifiedObject.name ?bus
}
GROUP by ?name ?bus ?ratedS ?ratedU ?ipu ?ratedE ?storedE ?state ?p ?q ?id ?fdrid
ORDER by ?name
'''
# Preview API call output for the query on the IEEE 13 node model
gapps.query_data(query)
###Output
_____no_output_____
###Markdown
SynchronousMachine - DistSyncMachine
###Code
query = """
# SynchronousMachine - DistSyncMachine
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?name ?bus (group_concat(distinct ?phs;separator="\\n") as ?phases) ?ratedS ?ratedU ?p ?q ?id ?fdrid WHERE {
VALUES ?fdrid {"%s"} # inserts model_mrid
?s r:type c:SynchronousMachine.
?s c:IdentifiedObject.name ?name.
?s c:Equipment.EquipmentContainer ?fdr.
?fdr c:IdentifiedObject.mRID ?fdrid.
?s c:SynchronousMachine.ratedS ?ratedS.
?s c:SynchronousMachine.ratedU ?ratedU.
?s c:SynchronousMachine.p ?p.
?s c:SynchronousMachine.q ?q.
bind(strafter(str(?s),"#_") as ?id).
OPTIONAL {?smp c:SynchronousMachinePhase.SynchronousMachine ?s.
?smp c:SynchronousMachinePhase.phase ?phsraw.
bind(strafter(str(?phsraw),"SinglePhaseKind.") as ?phs) }
?t c:Terminal.ConductingEquipment ?s.
?t c:Terminal.ConnectivityNode ?cn.
?cn c:IdentifiedObject.name ?bus
}
GROUP by ?name ?bus ?ratedS ?ratedU ?p ?q ?id ?fdrid
ORDER by ?name
""" % model_mrid
# Preview API call output for the query on the IEEE 13 node model
gapps.query_data(query)
query = """
# Solar - DistSolar
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?name ?bus ?ratedS ?ratedU ?ipu ?p ?q ?fdrid (group_concat(distinct ?phs;separator="\\n") as ?phases) WHERE {
?s r:type c:PhotovoltaicUnit.
?s c:IdentifiedObject.name ?name.
?pec c:PowerElectronicsConnection.PowerElectronicsUnit ?s.
# feeder selection options - if all commented out, query matches all feeders
#VALUES ?fdrid {"_C1C3E687-6FFD-C753-582B-632A27E28507"} # 123 bus
#VALUES ?fdrid {"_49AD8E07-3BF9-A4E2-CB8F-C3722F837B62"} # 13 bus
#VALUES ?fdrid {"_5B816B93-7A5F-B64C-8460-47C17D6E4B0F"} # 13 bus assets
#VALUES ?fdrid {"_4F76A5F9-271D-9EB8-5E31-AA362D86F2C3"} # 8500 node
#VALUES ?fdrid {"_67AB291F-DCCD-31B7-B499-338206B9828F"} # J1
#VALUES ?fdrid {"_9CE150A8-8CC5-A0F9-B67E-BBD8C79D3095"} # R2 12.47 3
?pec c:Equipment.EquipmentContainer ?fdr.
?fdr c:IdentifiedObject.mRID ?fdrid.
?pec c:PowerElectronicsConnection.ratedS ?ratedS.
?pec c:PowerElectronicsConnection.ratedU ?ratedU.
?pec c:PowerElectronicsConnection.maxIFault ?ipu.
?pec c:PowerElectronicsConnection.p ?p.
?pec c:PowerElectronicsConnection.q ?q.
OPTIONAL {?pecp c:PowerElectronicsConnectionPhase.PowerElectronicsConnection ?pec.
?pecp c:PowerElectronicsConnectionPhase.phase ?phsraw.
bind(strafter(str(?phsraw),"SinglePhaseKind.") as ?phs) }
?t c:Terminal.ConductingEquipment ?pec.
?t c:Terminal.ConnectivityNode ?cn.
?cn c:IdentifiedObject.name ?bus
}
GROUP by ?name ?bus ?ratedS ?ratedU ?ipu ?p ?q ?fdrid
ORDER by ?name
"""
# Preview API call output for the query on the IEEE 13 node model
gapps.query_data(query)
query = """
# list houses - DistHouse
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?fdrname ?name ?parent ?coolingSetpoint ?coolingSystem ?floorArea ?heatingSetpoint ?heatingSystem ?hvacPowerFactor ?numberOfStories ?thermalIntegrity ?id ?fdrid
WHERE {
VALUES ?fdrid {"%s"} # inserts model_mrid
?h r:type c:House.
?h c:IdentifiedObject.name ?name.
?h c:IdentifiedObject.mRID ?id.
?h c:House.floorArea ?floorArea.
?h c:House.numberOfStories ?numberOfStories.
OPTIONAL{?h c:House.coolingSetpoint ?coolingSetpoint.}
OPTIONAL{?h c:House.heatingSetpoint ?heatingSetpoint.}
OPTIONAL{?h c:House.hvacPowerFactor ?hvacPowerFactor.}
?h c:House.coolingSystem ?coolingSystemRaw.
bind(strafter(str(?coolingSystemRaw),"HouseCooling.") as ?coolingSystem)
?h c:House.heatingSystem ?heatingSystemRaw.
bind(strafter(str(?heatingSystemRaw),"HouseHeating.") as ?heatingSystem)
?h c:House.thermalIntegrity ?thermalIntegrityRaw.
bind(strafter(str(?thermalIntegrityRaw),"HouseThermalIntegrity.") as ?thermalIntegrity)
?h c:House.EnergyConsumer ?econ.
?econ c:IdentifiedObject.name ?parent.
?fdr c:IdentifiedObject.mRID ?fdrid.
?fdr c:IdentifiedObject.name ?fdrname.
?econ c:Equipment.EquipmentContainer ?fdr.
}
ORDER BY ?fdrname ?name
""" % model_mrid
# Preview API call output for the query on the IEEE 13 node model
gapps.query_data(query)
###Output
_____no_output_____
###Markdown
Querying for measurements List all measurements, with buses and equipments
###Code
query = """
# list all measurements, with buses and equipments
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?class ?type ?name ?bus ?phases ?eqtype ?eqname ?eqid ?trmid ?id WHERE {
VALUES ?fdrid {"%s"} # inserts model_mrid
?eq c:Equipment.EquipmentContainer ?fdr.
?fdr c:IdentifiedObject.mRID ?fdrid.
{ ?s r:type c:Discrete. bind ("Discrete" as ?class)}
UNION
{ ?s r:type c:Analog. bind ("Analog" as ?class)}
?s c:IdentifiedObject.name ?name .
?s c:IdentifiedObject.mRID ?id .
?s c:Measurement.PowerSystemResource ?eq .
?s c:Measurement.Terminal ?trm .
?s c:Measurement.measurementType ?type .
?trm c:IdentifiedObject.mRID ?trmid.
?eq c:IdentifiedObject.mRID ?eqid.
?eq c:IdentifiedObject.name ?eqname.
?eq r:type ?typeraw.
bind(strafter(str(?typeraw),"#") as ?eqtype)
?trm c:Terminal.ConnectivityNode ?cn.
?cn c:IdentifiedObject.name ?bus.
?s c:Measurement.phases ?phsraw .
{bind(strafter(str(?phsraw),"PhaseCode.") as ?phases)}
}
ORDER BY ?class ?type ?name
""" % model_mrid
# Preview API call output for the query on the IEEE 13 node model
gapps.query_data(query)
###Output
_____no_output_____
###Markdown
List measurement points for PowerTransformer with no tanks
###Code
query = """
# list measurement points for PowerTransformer with no tanks
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
SELECT ?name ?wnum ?bus ?eqid ?trmid WHERE {
VALUES ?fdrid {"%s"} # inserts model_mrid
?s c:Equipment.EquipmentContainer ?fdr.
?fdr c:IdentifiedObject.mRID ?fdrid.
?s r:type c:PowerTransformer.
?s c:IdentifiedObject.name ?name.
?s c:IdentifiedObject.mRID ?eqid.
?end c:PowerTransformerEnd.PowerTransformer ?s.
?end c:TransformerEnd.Terminal ?trm.
?end c:TransformerEnd.endNumber ?wnum.
?trm c:IdentifiedObject.mRID ?trmid.
?trm c:Terminal.ConnectivityNode ?cn.
?cn c:IdentifiedObject.name ?bus.
}
ORDER BY ?name ?wnum
""" % model_mrid
# Preview API call output for the query on the IEEE 13 node model
gapps.query_data(query)
###Output
_____no_output_____
###Markdown
List measurement points for Breakers, Reclosers, LoadBreakSwitches in a selected feederThis query obtains
###Code
query = """
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?cimtype ?name ?bus1 ?bus2 ?id (group_concat(distinct ?phs;separator="") as ?phases) WHERE {
SELECT ?cimtype ?name ?bus1 ?bus2 ?phs ?id WHERE {
VALUES ?fdrid {"%s"} # inserts model_mrid
VALUES ?cimraw {c:LoadBreakSwitch c:Recloser c:Breaker}
?fdr c:IdentifiedObject.mRID ?fdrid.
?s r:type ?cimraw.
bind(strafter(str(?cimraw),"#") as ?cimtype)
?s c:Equipment.EquipmentContainer ?fdr.
?s c:IdentifiedObject.name ?name.
?s c:IdentifiedObject.mRID ?id.
?t1 c:Terminal.ConductingEquipment ?s.
?t1 c:ACDCTerminal.sequenceNumber "1".
?t1 c:Terminal.ConnectivityNode ?cn1.
?cn1 c:IdentifiedObject.name ?bus1.
?t2 c:Terminal.ConductingEquipment ?s.
?t2 c:ACDCTerminal.sequenceNumber "2".
?t2 c:Terminal.ConnectivityNode ?cn2.
?cn2 c:IdentifiedObject.name ?bus2
OPTIONAL {?swp c:SwitchPhase.Switch ?s.
?swp c:SwitchPhase.phaseSide1 ?phsraw.
bind(strafter(str(?phsraw),"SinglePhaseKind.") as ?phs) }
} ORDER BY ?name ?phs
}
GROUP BY ?cimtype ?name ?bus1 ?bus2 ?id
ORDER BY ?cimtype ?name
""" % model_mrid
# Preview API call output for the query on the IEEE 13 node model
gapps.query_data(query)
###Output
_____no_output_____
###Markdown
List measurement points for PowerElectronicsConnection with BatteryUnit in a selected feeder
###Code
query = """
# list measurement points for PowerElectronicsConnection with BatteryUnit in a selected feeder
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?name ?uname ?bus ?id (group_concat(distinct ?phs;separator="") as ?phases) WHERE {
SELECT ?name ?uname ?bus ?phs ?id WHERE {
VALUES ?fdrid {"%s"} # inserts model_mrid
?fdr c:IdentifiedObject.mRID ?fdrid.
?s r:type c:PowerElectronicsConnection.
?s c:Equipment.EquipmentContainer ?fdr.
?s c:IdentifiedObject.name ?name.
?s c:IdentifiedObject.mRID ?id.
?peu r:type c:BatteryUnit.
?peu c:IdentifiedObject.name ?uname.
?s c:PowerElectronicsConnection.PowerElectronicsUnit ?peu.
?t1 c:Terminal.ConductingEquipment ?s.
?t1 c:ACDCTerminal.sequenceNumber "1".
?t1 c:Terminal.ConnectivityNode ?cn1.
?cn1 c:IdentifiedObject.name ?bus.
OPTIONAL {?pep c:PowerElectronicsConnectionPhase.PowerElectronicsConnection ?s.
?pep c:PowerElectronicsConnectionPhase.phase ?phsraw.
bind(strafter(str(?phsraw),"SinglePhaseKind.") as ?phs) }
} ORDER BY ?name ?phs
}
GROUP BY ?name ?uname ?bus ?id
ORDER BY ?name
""" % model_mrid
# Preview API call output for the query on the IEEE 13 node model
gapps.query_data(query)
###Output
_____no_output_____
###Markdown
List measurement points for ACLineSegments in a selected feeder
###Code
query = """
# list measurement points for ACLineSegments in a selected feeder
PREFIX r: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX c: <http://iec.ch/TC57/CIM100#>
SELECT ?name ?bus1 ?bus2 ?id (group_concat(distinct ?phs;separator="") as ?phases) WHERE {
SELECT ?name ?bus1 ?bus2 ?phs ?id WHERE {
VALUES ?fdrid {"%s"} # inserts model_mrid
?fdr c:IdentifiedObject.mRID ?fdrid.
?s r:type c:ACLineSegment.
?s c:Equipment.EquipmentContainer ?fdr.
?s c:IdentifiedObject.name ?name.
?s c:IdentifiedObject.mRID ?id.
?t1 c:Terminal.ConductingEquipment ?s.
?t1 c:ACDCTerminal.sequenceNumber "1".
?t1 c:Terminal.ConnectivityNode ?cn1.
?cn1 c:IdentifiedObject.name ?bus1.
?t2 c:Terminal.ConductingEquipment ?s.
?t2 c:ACDCTerminal.sequenceNumber "2".
?t2 c:Terminal.ConnectivityNode ?cn2.
?cn2 c:IdentifiedObject.name ?bus2
OPTIONAL {?acp c:ACLineSegmentPhase.ACLineSegment ?s.
?acp c:ACLineSegmentPhase.phase ?phsraw.
bind(strafter(str(?phsraw),"SinglePhaseKind.") as ?phs) }
} ORDER BY ?name ?phs
}
GROUP BY ?name ?bus1 ?bus2 ?id
ORDER BY ?name
""" % model_mrid
# Preview API call output for the query on the IEEE 13 node model
gapps.query_data(query)
###Output
_____no_output_____ |
DSC 530 - Data Exploration and Analysis/ThinkStats2/code/chap03ex.ipynb | ###Markdown
Examples and Exercises from Think Stats, 2nd Editionhttp://thinkstats2.comCopyright 2016 Allen B. DowneyMIT License: https://opensource.org/licenses/MIT
###Code
from __future__ import print_function, division
%matplotlib inline
import numpy as np
import nsfg
import first
import thinkstats2
import thinkplot
###Output
_____no_output_____
###Markdown
Again, I'll load the NSFG pregnancy file and select live births:
###Code
preg = nsfg.ReadFemPreg()
live = preg[preg.outcome == 1]
###Output
_____no_output_____
###Markdown
Here's the histogram of birth weights:
###Code
hist = thinkstats2.Hist(live.birthwgt_lb, label='birthwgt_lb')
thinkplot.Hist(hist)
thinkplot.Config(xlabel='Birth weight (pounds)', ylabel='Count')
###Output
_____no_output_____
###Markdown
To normalize the distribution, we could divide through by the total count:
###Code
n = hist.Total()
pmf = hist.Copy()
for x, freq in hist.Items():
pmf[x] = freq / n
###Output
_____no_output_____
###Markdown
The result is a Probability Mass Function (PMF).
###Code
thinkplot.Hist(pmf)
thinkplot.Config(xlabel='Birth weight (pounds)', ylabel='PMF')
###Output
_____no_output_____
###Markdown
More directly, we can create a Pmf object.
###Code
pmf = thinkstats2.Pmf([1, 2, 2, 3, 5])
pmf
###Output
_____no_output_____
###Markdown
`Pmf` provides `Prob`, which looks up a value and returns its probability:
###Code
pmf.Prob(2)
###Output
_____no_output_____
###Markdown
The bracket operator does the same thing.
###Code
pmf[2]
###Output
_____no_output_____
###Markdown
The `Incr` method adds to the probability associated with a given values.
###Code
pmf.Incr(2, 0.2)
pmf[2]
###Output
_____no_output_____
###Markdown
The `Mult` method multiplies the probability associated with a value.
###Code
pmf.Mult(2, 0.5)
pmf[2]
###Output
_____no_output_____
###Markdown
`Total` returns the total probability (which is no longer 1, because we changed one of the probabilities).
###Code
pmf.Total()
###Output
_____no_output_____
###Markdown
`Normalize` divides through by the total probability, making it 1 again.
###Code
pmf.Normalize()
pmf.Total()
###Output
_____no_output_____
###Markdown
Here's the PMF of pregnancy length for live births.
###Code
pmf = thinkstats2.Pmf(live.prglngth, label='prglngth')
###Output
_____no_output_____
###Markdown
Here's what it looks like plotted with `Hist`, which makes a bar graph.
###Code
thinkplot.Hist(pmf)
thinkplot.Config(xlabel='Pregnancy length (weeks)', ylabel='Pmf')
###Output
_____no_output_____
###Markdown
Here's what it looks like plotted with `Pmf`, which makes a step function.
###Code
thinkplot.Pmf(pmf)
thinkplot.Config(xlabel='Pregnancy length (weeks)', ylabel='Pmf')
###Output
_____no_output_____
###Markdown
We can use `MakeFrames` to return DataFrames for all live births, first babies, and others.
###Code
live, firsts, others = first.MakeFrames()
###Output
_____no_output_____
###Markdown
Here are the distributions of pregnancy length.
###Code
first_pmf = thinkstats2.Pmf(firsts.prglngth, label='firsts')
other_pmf = thinkstats2.Pmf(others.prglngth, label='others')
###Output
_____no_output_____
###Markdown
And here's the code that replicates one of the figures in the chapter.
###Code
width=0.45
axis = [27, 46, 0, 0.6]
thinkplot.PrePlot(2, cols=2)
thinkplot.Hist(first_pmf, align='right', width=width)
thinkplot.Hist(other_pmf, align='left', width=width)
thinkplot.Config(xlabel='Pregnancy length(weeks)', ylabel='PMF', axis=axis)
thinkplot.PrePlot(2)
thinkplot.SubPlot(2)
thinkplot.Pmfs([first_pmf, other_pmf])
thinkplot.Config(xlabel='Pregnancy length(weeks)', axis=axis)
###Output
_____no_output_____
###Markdown
Here's the code that generates a plot of the difference in probability (in percentage points) between first babies and others, for each week of pregnancy (showing only pregnancies considered "full term").
###Code
weeks = range(35, 46)
diffs = []
for week in weeks:
p1 = first_pmf.Prob(week)
p2 = other_pmf.Prob(week)
diff = 100 * (p1 - p2)
diffs.append(diff)
thinkplot.Bar(weeks, diffs)
thinkplot.Config(xlabel='Pregnancy length(weeks)', ylabel='Difference (percentage points)')
###Output
_____no_output_____
###Markdown
Biasing and unbiasing PMFsHere's the example in the book showing operations we can perform with `Pmf` objects.Suppose we have the following distribution of class sizes.
###Code
d = { 7: 8, 12: 8, 17: 14, 22: 4,
27: 6, 32: 12, 37: 8, 42: 3, 47: 2 }
pmf = thinkstats2.Pmf(d, label='actual')
###Output
_____no_output_____
###Markdown
This function computes the biased PMF we would get if we surveyed students and asked about the size of the classes they are in.
###Code
def BiasPmf(pmf, label):
new_pmf = pmf.Copy(label=label)
for x, p in pmf.Items():
new_pmf.Mult(x, x)
new_pmf.Normalize()
return new_pmf
###Output
_____no_output_____
###Markdown
The following graph shows the difference between the actual and observed distributions.
###Code
biased_pmf = BiasPmf(pmf, label='observed')
thinkplot.PrePlot(2)
thinkplot.Pmfs([pmf, biased_pmf])
thinkplot.Config(xlabel='Class size', ylabel='PMF')
###Output
_____no_output_____
###Markdown
The observed mean is substantially higher than the actual.
###Code
print('Actual mean', pmf.Mean())
print('Observed mean', biased_pmf.Mean())
###Output
Actual mean 23.692307692307693
Observed mean 29.123376623376625
###Markdown
If we were only able to collect the biased sample, we could "unbias" it by applying the inverse operation.
###Code
def UnbiasPmf(pmf, label=None):
new_pmf = pmf.Copy(label=label)
for x, p in pmf.Items():
new_pmf[x] *= 1/x
new_pmf.Normalize()
return new_pmf
###Output
_____no_output_____
###Markdown
We can unbias the biased PMF:
###Code
unbiased = UnbiasPmf(biased_pmf, label='unbiased')
print('Unbiased mean', unbiased.Mean())
###Output
Unbiased mean 23.69230769230769
###Markdown
And plot the two distributions to confirm they are the same.
###Code
thinkplot.PrePlot(2)
thinkplot.Pmfs([pmf, unbiased])
thinkplot.Config(xlabel='Class size', ylabel='PMF')
###Output
_____no_output_____
###Markdown
Pandas indexingHere's an example of a small DataFrame.
###Code
import numpy as np
import pandas
array = np.random.randn(4, 2)
df = pandas.DataFrame(array)
df
###Output
_____no_output_____
###Markdown
We can specify column names when we create the DataFrame:
###Code
columns = ['A', 'B']
df = pandas.DataFrame(array, columns=columns)
df
###Output
_____no_output_____
###Markdown
We can also specify an index that contains labels for the rows.
###Code
index = ['a', 'b', 'c', 'd']
df = pandas.DataFrame(array, columns=columns, index=index)
df
###Output
_____no_output_____
###Markdown
Normal indexing selects columns.
###Code
df['A']
###Output
_____no_output_____
###Markdown
We can use the `loc` attribute to select rows.
###Code
df.loc['a']
###Output
_____no_output_____
###Markdown
If you don't want to use the row labels and prefer to access the rows using integer indices, you can use the `iloc` attribute:
###Code
df.iloc[0]
###Output
_____no_output_____
###Markdown
`loc` can also take a list of labels.
###Code
indices = ['a', 'c']
df.loc[indices]
###Output
_____no_output_____
###Markdown
If you provide a slice of labels, `DataFrame` uses it to select rows.
###Code
df['a':'c']
###Output
_____no_output_____
###Markdown
If you provide a slice of integers, `DataFrame` selects rows by integer index.
###Code
df[0:2]
###Output
_____no_output_____
###Markdown
But notice that one method includes the last elements of the slice and one does not.In general, I recommend giving labels to the rows and names to the columns, and using them consistently. Exercises **Exercise:** Something like the class size paradox appears if you survey children and ask how many children are in their family. Families with many children are more likely to appear in your sample, and families with no children have no chance to be in the sample.Use the NSFG respondent variable `numkdhh` to construct the actual distribution for the number of children under 18 in the respondents' households.Now compute the biased distribution we would see if we surveyed the children and asked them how many children under 18 (including themselves) are in their household.Plot the actual and biased distributions, and compute their means.
###Code
resp = nsfg.ReadFemResp()
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** I started this book with the question, "Are first babies more likely to be late?" To address it, I computed the difference in means between groups of babies, but I ignored the possibility that there might be a difference between first babies and others for the same woman.To address this version of the question, select respondents who have at least two live births and compute pairwise differences. Does this formulation of the question yield a different result?Hint: use `nsfg.MakePregMap`:
###Code
live, firsts, others = first.MakeFrames()
preg_map = nsfg.MakePregMap(live)
# Solution goes here
# Solution goes here
# Solution goes here
###Output
_____no_output_____
###Markdown
**Exercise:** In most foot races, everyone starts at the same time. If you are a fast runner, you usually pass a lot of people at the beginning of the race, but after a few miles everyone around you is going at the same speed.When I ran a long-distance (209 miles) relay race for the first time, I noticed an odd phenomenon: when I overtook another runner, I was usually much faster, and when another runner overtook me, he was usually much faster.At first I thought that the distribution of speeds might be bimodal; that is, there were many slow runners and many fast runners, but few at my speed.Then I realized that I was the victim of a bias similar to the effect of class size. The race was unusual in two ways: it used a staggered start, so teams started at different times; also, many teams included runners at different levels of ability.As a result, runners were spread out along the course with little relationship between speed and location. When I joined the race, the runners near me were (pretty much) a random sample of the runners in the race.So where does the bias come from? During my time on the course, the chance of overtaking a runner, or being overtaken, is proportional to the difference in our speeds. I am more likely to catch a slow runner, and more likely to be caught by a fast runner. But runners at the same speed are unlikely to see each other.Write a function called `ObservedPmf` that takes a `Pmf` representing the actual distribution of runners’ speeds, and the speed of a running observer, and returns a new `Pmf` representing the distribution of runners’ speeds as seen by the observer.To test your function, you can use `relay.py`, which reads the results from the James Joyce Ramble 10K in Dedham MA and converts the pace of each runner to mph.Compute the distribution of speeds you would observe if you ran a relay race at 7 mph with this group of runners.
###Code
import relay
results = relay.ReadResults()
speeds = relay.GetSpeeds(results)
speeds = relay.BinData(speeds, 3, 12, 100)
pmf = thinkstats2.Pmf(speeds, 'actual speeds')
thinkplot.Pmf(pmf)
thinkplot.Config(xlabel='Speed (mph)', ylabel='PMF')
# Solution goes here
# Solution goes here
###Output
_____no_output_____ |
Python/61_Registration_Introduction_Continued.ipynb | ###Markdown
Introduction to SimpleITKv4 Registration - Continued ITK v4 Registration Components Before starting with this notebook, please go over the first introductory notebook found [here](60_Registration_Introduction.ipynb). In this notebook we will visually assess registration by viewing the overlap between images using external viewers.The two viewers we recommend for this task are [ITK-SNAP](http://www.itksnap.org) and [3D Slicer](http://www.slicer.org/). ITK-SNAP supports concurrent linked viewing between multiple instances of the program. 3D Slicer supports concurrent viewing of multiple volumes via alpha blending.
###Code
import SimpleITK as sitk
# If the environment variable SIMPLE_ITK_MEMORY_CONSTRAINED_ENVIRONMENT is set, this will override the ReadImage
# function so that it also resamples the image to a smaller size (testing environment is memory constrained).
%run setup_for_testing
# Utility method that either downloads data from the network or
# if already downloaded returns the file name for reading from disk (cached data).
%run update_path_to_download_script
from downloaddata import fetch_data as fdata
# Always write output to a separate directory, we don't want to pollute the source directory.
import os
OUTPUT_DIR = 'Output'
# GUI components (sliders, dropdown...).
from ipywidgets import interact, fixed
# Enable display of HTML.
from IPython.display import display, HTML
# Plots will be inlined.
%matplotlib inline
# Callbacks for plotting registration progress.
import registration_callbacks
###Output
_____no_output_____
###Markdown
Utility functionsA number of utility functions, saving a transform and corresponding resampled image, callback for selecting a DICOM series from several series found in the same directory.
###Code
def save_transform_and_image(transform, fixed_image, moving_image, outputfile_prefix):
"""
Write the given transformation to file, resample the moving_image onto the fixed_images grid and save the
result to file.
Args:
transform (SimpleITK Transform): transform that maps points from the fixed image coordinate system to the moving.
fixed_image (SimpleITK Image): resample onto the spatial grid defined by this image.
moving_image (SimpleITK Image): resample this image.
outputfile_prefix (string): transform is written to outputfile_prefix.tfm and resampled image is written to
outputfile_prefix.mha.
"""
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(fixed_image)
# SimpleITK supports several interpolation options, we go with the simplest that gives reasonable results.
resample.SetInterpolator(sitk.sitkLinear)
resample.SetTransform(transform)
sitk.WriteImage(resample.Execute(moving_image), outputfile_prefix+'.mha')
sitk.WriteTransform(transform, outputfile_prefix+'.tfm')
def DICOM_series_dropdown_callback(fixed_image, moving_image, series_dictionary):
"""
Callback from dropbox which selects the two series which will be used for registration.
The callback prints out some information about each of the series from the meta-data dictionary.
For a list of all meta-dictionary tags and their human readable names see DICOM standard part 6,
Data Dictionary (http://medical.nema.org/medical/dicom/current/output/pdf/part06.pdf)
"""
# The callback will update these global variables with the user selection.
global selected_series_fixed
global selected_series_moving
img_fixed = sitk.ReadImage(series_dictionary[fixed_image][0])
img_moving = sitk.ReadImage(series_dictionary[moving_image][0])
# There are many interesting tags in the DICOM data dictionary, display a selected few.
tags_to_print = {'0010|0010': 'Patient name: ',
'0008|0060' : 'Modality: ',
'0008|0021' : 'Series date: ',
'0008|0031' : 'Series time:',
'0008|0070' : 'Manufacturer: '}
html_table = []
html_table.append('<table><tr><td><b>Tag</b></td><td><b>Fixed Image</b></td><td><b>Moving Image</b></td></tr>')
for tag in tags_to_print:
fixed_tag = ''
moving_tag = ''
try:
fixed_tag = img_fixed.GetMetaData(tag)
except: # ignore if the tag isn't in the dictionary
pass
try:
moving_tag = img_moving.GetMetaData(tag)
except: # ignore if the tag isn't in the dictionary
pass
html_table.append('<tr><td>' + tags_to_print[tag] +
'</td><td>' + fixed_tag +
'</td><td>' + moving_tag + '</td></tr>')
html_table.append('</table>')
display(HTML(''.join(html_table)))
selected_series_fixed = fixed_image
selected_series_moving = moving_image
###Output
_____no_output_____
###Markdown
Loading DataIn this notebook we will work with CT and MR scans of the CIRS 057A multi-modality abdominal phantom. The scans are multi-slice DICOM images. The data is stored in a zip archive which is automatically retrieved and extracted when we request a file which is part of the archive.
###Code
data_directory = os.path.dirname(fdata("CIRS057A_MR_CT_DICOM/readme.txt"))
# 'selected_series_moving/fixed' will be updated by the interact function.
selected_series_fixed = ''
selected_series_moving = ''
# Directory contains multiple DICOM studies/series, store the file names
# in dictionary with the key being the series ID.
reader = sitk.ImageSeriesReader()
series_file_names = {}
series_IDs = list(reader.GetGDCMSeriesIDs(data_directory)) #list of all series
if series_IDs: #check that we have at least one series
for series in series_IDs:
series_file_names[series] = reader.GetGDCMSeriesFileNames(data_directory, series)
interact(DICOM_series_dropdown_callback, fixed_image=series_IDs, moving_image =series_IDs, series_dictionary=fixed(series_file_names));
else:
print('This is surprising, data directory does not contain any DICOM series.')
# Actually read the data based on the user's selection.
fixed_image = sitk.ReadImage(series_file_names[selected_series_fixed])
moving_image = sitk.ReadImage(series_file_names[selected_series_moving])
# Save images to file and view overlap using external viewer.
sitk.WriteImage(fixed_image, os.path.join(OUTPUT_DIR, "fixedImage.mha"))
sitk.WriteImage(moving_image, os.path.join(OUTPUT_DIR, "preAlignment.mha"))
###Output
_____no_output_____
###Markdown
Initial AlignmentA reasonable guesstimate for the initial translational alignment can be obtained by usingthe CenteredTransformInitializer (functional interface to the CenteredTransformInitializerFilter). The resulting transformation is centered with respect to the fixed image and thetranslation aligns the centers of the two images. There are two options fordefining the centers of the images, either the physical centersof the two data sets (GEOMETRY), or the centers defined by the intensity moments (MOMENTS).Two things to note about this filter, it requires the fixed and moving image have the same type even though it is not algorithmically required, and itsreturn type is the generic SimpleITK.Transform.
###Code
initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(fixed_image,moving_image.GetPixelID()),
moving_image,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
# Save moving image after initial transform and view overlap using external viewer.
save_transform_and_image(initial_transform, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "initialAlignment"))
###Output
_____no_output_____
###Markdown
Look at the transformation, what type is it?
###Code
print(initial_transform)
###Output
_____no_output_____
###Markdown
Final registration Version 1 Single scale (not using image pyramid). Initial transformation is not modified in place.Illustrate the need for scaling the step size differently for each parameter: SetOptimizerScalesFromIndexShift - estimated from maximum shift of voxel indexes (only use if data is isotropic). SetOptimizerScalesFromPhysicalShift - estimated from maximum shift of physical locations of voxels. SetOptimizerScalesFromJacobian - estimated from the averaged squared norm of the Jacobian w.r.t. parameters.Look at the optimizer's stopping condition to ensure we have not terminated prematurely.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100)
# Scale the step size differently for each parameter, this is critical!!!
registration_method.SetOptimizerScalesFromPhysicalShift()
registration_method.SetInitialTransform(initial_transform, inPlace=False)
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
final_transform_v1 = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform_v1, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v1"))
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it?
###Code
print(final_transform_v1)
###Output
_____no_output_____
###Markdown
Version 1.1The previous example illustrated the use of the ITK v4 registration framework in an ITK v3 manner. We only referred to a single transformation which was what we optimized.In ITK v4 the registration method accepts three transformations (if you look at the diagram above you will only see two transformations, Moving transform represents $T_{opt} \circ T_m$):SetInitialTransform, $T_{opt}$ - composed with the moving initial transform, maps points from the virtual image domain to the moving image domain, modified during optimization. SetFixedInitialTransform $T_f$- maps points from the virtual image domain to the fixed image domain, never modified.SetMovingInitialTransform $T_m$- maps points from the virtual image domain to the moving image domain, never modified.The transformation that maps points from the fixed to moving image domains is thus: $^M\mathbf{p} = T_{opt}(T_m(T_f^{-1}(^F\mathbf{p})))$We now modify the previous example to use $T_{opt}$ and $T_m$.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100)
registration_method.SetOptimizerScalesFromPhysicalShift()
# Set the initial moving and optimized transforms.
optimized_transform = sitk.Euler3DTransform()
registration_method.SetMovingInitialTransform(initial_transform)
registration_method.SetInitialTransform(optimized_transform)
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
# Need to compose the transformations after registration.
final_transform_v11 = sitk.Transform(optimized_transform)
final_transform_v11.AddTransform(initial_transform)
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform_v11, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v1.1"))
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it? Why is it different from the previous example?
###Code
print(final_transform_v11)
###Output
_____no_output_____
###Markdown
Version 2 Multi scale - specify both scale, and how much to smooth with respect to original image. Initial transformation modified in place, so in the end we have the same type of transformation in hand.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100) #, estimateLearningRate=registration_method.EachIteration)
registration_method.SetOptimizerScalesFromPhysicalShift()
final_transform = sitk.Euler3DTransform(initial_transform)
registration_method.SetInitialTransform(final_transform)
registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas = [2,1,0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent,
registration_callbacks.metric_update_multires_iterations)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform, fixed_image, moving_image, os.path.join(OUTPUT_DIR, 'finalAlignment-v2'))
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it?
###Code
print(final_transform)
###Output
_____no_output_____
###Markdown
Introduction to SimpleITKv4 Registration - Continued ITK v4 Registration Components Before starting with this notebook, please go over the first introductory notebook found [here](60_Registration_Introduction.ipynb). In this notebook we will visually assess registration by viewing the overlap between images using external viewers.The two viewers we recommend for this task are [ITK-SNAP](http://www.itksnap.org) and [3D Slicer](http://www.slicer.org/). ITK-SNAP supports concurrent linked viewing between multiple instances of the program. 3D Slicer supports concurrent viewing of multiple volumes via alpha blending.
###Code
import SimpleITK as sitk
# Utility method that either downloads data from the network or
# if already downloaded returns the file name for reading from disk (cached data).
%run update_path_to_download_script
from downloaddata import fetch_data as fdata
# Always write output to a separate directory, we don't want to pollute the source directory.
import os
OUTPUT_DIR = 'Output'
# GUI components (sliders, dropdown...).
from ipywidgets import interact, fixed
# Enable display of html.
from IPython.display import display, HTML
# Plots will be inlined.
%matplotlib inline
# Callbacks for plotting registration progress.
import registration_callbacks
###Output
_____no_output_____
###Markdown
Utility functionsA number of utility functions, saving a transform and corresponding resampled image, callback for selecting a DICOM series from several series found in the same directory.
###Code
def save_transform_and_image(transform, fixed_image, moving_image, outputfile_prefix):
"""
Write the given transformation to file, resample the moving_image onto the fixed_images grid and save the
result to file.
Args:
transform (SimpleITK Transform): transform that maps points from the fixed image coordinate system to the moving.
fixed_image (SimpleITK Image): resample onto the spatial grid defined by this image.
moving_image (SimpleITK Image): resample this image.
outputfile_prefix (string): transform is written to outputfile_prefix.tfm and resampled image is written to
outputfile_prefix.mha.
"""
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(fixed_image)
# SimpleITK supports several interpolation options, we go with the simplest that gives reasonable results.
resample.SetInterpolator(sitk.sitkLinear)
resample.SetTransform(transform)
sitk.WriteImage(resample.Execute(moving_image), outputfile_prefix+'.mha')
sitk.WriteTransform(transform, outputfile_prefix+'.tfm')
def DICOM_series_dropdown_callback(fixed_image, moving_image, series_dictionary):
"""
Callback from dropbox which selects the two series which will be used for registration.
The callback prints out some information about each of the series from the meta-data dictionary.
For a list of all meta-dictionary tags and their human readable names see DICOM standard part 6,
Data Dictionary (http://medical.nema.org/medical/dicom/current/output/pdf/part06.pdf)
"""
# The callback will update these global variables with the user selection.
global selected_series_fixed
global selected_series_moving
img_fixed = sitk.ReadImage(series_dictionary[fixed_image][0])
img_moving = sitk.ReadImage(series_dictionary[moving_image][0])
# There are many interesting tags in the DICOM data dictionary, display a selected few.
tags_to_print = {'0010|0010': 'Patient name: ',
'0008|0060' : 'Modality: ',
'0008|0021' : 'Series date: ',
'0008|0031' : 'Series time:',
'0008|0070' : 'Manufacturer: '}
html_table = []
html_table.append('<table><tr><td><b>Tag</b></td><td><b>Fixed Image</b></td><td><b>Moving Image</b></td></tr>')
for tag in tags_to_print:
fixed_tag = ''
moving_tag = ''
try:
fixed_tag = img_fixed.GetMetaData(tag)
except: # ignore if the tag isn't in the dictionary
pass
try:
moving_tag = img_moving.GetMetaData(tag)
except: # ignore if the tag isn't in the dictionary
pass
html_table.append('<tr><td>' + tags_to_print[tag] +
'</td><td>' + fixed_tag +
'</td><td>' + moving_tag + '</td></tr>')
html_table.append('</table>')
display(HTML(''.join(html_table)))
selected_series_fixed = fixed_image
selected_series_moving = moving_image
###Output
_____no_output_____
###Markdown
Loading DataIn this notebook we will work with CT and MR scans of the CIRS 057A multi-modality abdominal phantom. The scans are multi-slice DICOM images. The data is stored in a zip archive which is automatically retrieved and extracted when we request a file which is part of the archive.
###Code
data_directory = os.path.dirname(fdata("CIRS057A_MR_CT_DICOM/readme.txt"))
# 'selected_series_moving/fixed' will be updated by the interact function.
selected_series_fixed = ''
selected_series_moving = ''
# Directory contains multiple DICOM studies/series, store the file names
# in dictionary with the key being the seriesID.
reader = sitk.ImageSeriesReader()
series_file_names = {}
series_IDs = reader.GetGDCMSeriesIDs(data_directory) #list of all series
if series_IDs: #check that we have at least one series
for series in series_IDs:
series_file_names[series] = reader.GetGDCMSeriesFileNames(data_directory, series)
interact(DICOM_series_dropdown_callback, fixed_image=series_IDs, moving_image =series_IDs, series_dictionary=fixed(series_file_names));
else:
print('This is surprising, data directory does not contain any DICOM series.')
# Actually read the data based on the user's selection.
reader.SetFileNames(series_file_names[selected_series_fixed])
fixed_image = reader.Execute()
reader.SetFileNames(series_file_names[selected_series_moving])
moving_image = reader.Execute()
# Save images to file and view overlap using external viewer.
sitk.WriteImage(fixed_image, os.path.join(OUTPUT_DIR, "fixedImage.mha"))
sitk.WriteImage(moving_image, os.path.join(OUTPUT_DIR, "preAlignment.mha"))
###Output
_____no_output_____
###Markdown
Initial AlignmentA reasonable guesstimate for the initial translational alignment can be obtained by usingthe CenteredTransformInitializer (functional interface to the CenteredTransformInitializerFilter). The resulting transformation is centered with respect to the fixed image and thetranslation aligns the centers of the two images. There are two options fordefining the centers of the images, either the physical centersof the two data sets (GEOMETRY), or the centers defined by the intensity moments (MOMENTS).Two things to note about this filter, it requires the fixed and moving image have the same type even though it is not algorithmically required, and itsreturn type is the generic SimpleITK.Transform.
###Code
initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(fixed_image,moving_image.GetPixelID()),
moving_image,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
# Save moving image after initial transform and view overlap using external viewer.
save_transform_and_image(initial_transform, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "initialAlignment"))
###Output
_____no_output_____
###Markdown
Look at the transformation, what type is it?
###Code
print(initial_transform)
###Output
_____no_output_____
###Markdown
Final registration Version 1 Single scale (not using image pyramid). Initial transformation is not modified in place.Illustrate the need for scaling the step size differently for each parameter: SetOptimizerScalesFromIndexShift - estimated from maximum shift of voxel indexes (only use if data is isotropic). SetOptimizerScalesFromPhysicalShift - estimated from maximum shift of physical locations of voxels. SetOptimizerScalesFromJacobian - estimated from the averaged squared norm of the Jacobian w.r.t. parameters.Look at the optimizer's stopping condition to ensure we have not terminated prematurely.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100)
# Scale the step size differently for each parameter, this is critical!!!
registration_method.SetOptimizerScalesFromPhysicalShift()
registration_method.SetInitialTransform(initial_transform, inPlace=False)
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
final_transform_v1 = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform_v1, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v1"))
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it?
###Code
print(final_transform_v1)
###Output
_____no_output_____
###Markdown
Version 1.1The previous example illustrated the use of the ITK v4 registration framework in an ITK v3 manner. We only referred to a single transformation which was what we optimized.In ITK v4 the registration method accepts three transformations (if you look at the diagram above you will only see two transformations, Moving transform represents $T_{opt} \circ T_m$):SetInitialTransform, $T_{opt}$ - composed with the moving initial transform, maps points from the virtual image domain to the moving image domain, modified during optimization. SetFixedInitialTransform $T_f$- maps points from the virtual image domain to the fixed image domain, never modified.SetMovingInitialTransform $T_m$- maps points from the virtual image domain to the moving image domain, never modified.The transformation that maps points from the fixed to moving image domains is thus: $^M\mathbf{p} = T_{opt}(T_m(T_f^{-1}(^F\mathbf{p})))$We now modify the previous example to use $T_{opt}$ and $T_m$.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100)
registration_method.SetOptimizerScalesFromPhysicalShift()
# Set the initial moving and optimized transforms.
optimized_transform = sitk.Euler3DTransform()
registration_method.SetMovingInitialTransform(initial_transform)
registration_method.SetInitialTransform(optimized_transform)
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
# Need to compose the transformations after registration.
final_transform_v11 = sitk.Transform(optimized_transform)
final_transform_v11.AddTransform(initial_transform)
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform_v11, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v1.1"))
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it? Why is it different from the previous example?
###Code
print(final_transform_v11)
###Output
_____no_output_____
###Markdown
Version 2 Multi scale - specify both scale, and how much to smooth with respect to original image. Initial transformation modified in place, so in the end we have the same type of transformation in hand.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100) #, estimateLearningRate=registration_method.EachIteration)
registration_method.SetOptimizerScalesFromPhysicalShift()
final_transform = sitk.Euler3DTransform(initial_transform)
registration_method.SetInitialTransform(final_transform)
registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas = [2,1,0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent,
registration_callbacks.metric_update_multires_iterations)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform, fixed_image, moving_image, os.path.join(OUTPUT_DIR, 'finalAlignment-v2'))
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it?
###Code
print(final_transform)
###Output
_____no_output_____
###Markdown
Introduction to SimpleITKv4 Registration - Continued ITK v4 Registration Components Before starting with this notebook, please go over the first introductory notebook found [here](60_Registration_Introduction.ipynb). In this notebook we will visually assess registration by viewing the overlap between images using external viewers.The two viewers we recommend for this task are [ITK-SNAP](http://www.itksnap.org) and [3D Slicer](http://www.slicer.org/). ITK-SNAP supports concurrent linked viewing between multiple instances of the program. 3D Slicer supports concurrent viewing of multiple volumes via alpha blending.
###Code
import SimpleITK as sitk
# If the environment variable SIMPLE_ITK_MEMORY_CONSTRAINED_ENVIRONMENT is set, this will override the ReadImage
# function so that it also resamples the image to a smaller size (testing environment is memory constrained).
%run setup_for_testing
# Utility method that either downloads data from the network or
# if already downloaded returns the file name for reading from disk (cached data).
%run update_path_to_download_script
from downloaddata import fetch_data as fdata
# Always write output to a separate directory, we don't want to pollute the source directory.
import os
OUTPUT_DIR = 'Output'
# GUI components (sliders, dropdown...).
from ipywidgets import interact, fixed
# Enable display of HTML.
from IPython.display import display, HTML
# Plots will be inlined.
%matplotlib inline
# Callbacks for plotting registration progress.
import registration_callbacks
###Output
_____no_output_____
###Markdown
Utility functionsA number of utility functions, saving a transform and corresponding resampled image, callback for selecting a DICOM series from several series found in the same directory.
###Code
def save_transform_and_image(transform, fixed_image, moving_image, outputfile_prefix):
"""
Write the given transformation to file, resample the moving_image onto the fixed_images grid and save the
result to file.
Args:
transform (SimpleITK Transform): transform that maps points from the fixed image coordinate system to the moving.
fixed_image (SimpleITK Image): resample onto the spatial grid defined by this image.
moving_image (SimpleITK Image): resample this image.
outputfile_prefix (string): transform is written to outputfile_prefix.tfm and resampled image is written to
outputfile_prefix.mha.
"""
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(fixed_image)
# SimpleITK supports several interpolation options, we go with the simplest that gives reasonable results.
resample.SetInterpolator(sitk.sitkLinear)
resample.SetTransform(transform)
sitk.WriteImage(resample.Execute(moving_image), outputfile_prefix+'.mha')
sitk.WriteTransform(transform, outputfile_prefix+'.tfm')
def DICOM_series_dropdown_callback(fixed_image, moving_image, series_dictionary):
"""
Callback from dropbox which selects the two series which will be used for registration.
The callback prints out some information about each of the series from the meta-data dictionary.
For a list of all meta-dictionary tags and their human readable names see DICOM standard part 6,
Data Dictionary (http://medical.nema.org/medical/dicom/current/output/pdf/part06.pdf)
"""
# The callback will update these global variables with the user selection.
global selected_series_fixed
global selected_series_moving
img_fixed = sitk.ReadImage(series_dictionary[fixed_image][0])
img_moving = sitk.ReadImage(series_dictionary[moving_image][0])
# There are many interesting tags in the DICOM data dictionary, display a selected few.
tags_to_print = {'0010|0010': 'Patient name: ',
'0008|0060' : 'Modality: ',
'0008|0021' : 'Series date: ',
'0008|0031' : 'Series time:',
'0008|0070' : 'Manufacturer: '}
html_table = []
html_table.append('<table><tr><td><b>Tag</b></td><td><b>Fixed Image</b></td><td><b>Moving Image</b></td></tr>')
for tag in tags_to_print:
fixed_tag = ''
moving_tag = ''
try:
fixed_tag = img_fixed.GetMetaData(tag)
except: # ignore if the tag isn't in the dictionary
pass
try:
moving_tag = img_moving.GetMetaData(tag)
except: # ignore if the tag isn't in the dictionary
pass
html_table.append('<tr><td>' + tags_to_print[tag] +
'</td><td>' + fixed_tag +
'</td><td>' + moving_tag + '</td></tr>')
html_table.append('</table>')
display(HTML(''.join(html_table)))
selected_series_fixed = fixed_image
selected_series_moving = moving_image
###Output
_____no_output_____
###Markdown
Loading DataIn this notebook we will work with CT and MR scans of the CIRS 057A multi-modality abdominal phantom. The scans are multi-slice DICOM images. The data is stored in a zip archive which is automatically retrieved and extracted when we request a file which is part of the archive.
###Code
data_directory = os.path.dirname(fdata("CIRS057A_MR_CT_DICOM/readme.txt"))
# 'selected_series_moving/fixed' will be updated by the interact function.
selected_series_fixed = ''
selected_series_moving = ''
# Directory contains multiple DICOM studies/series, store the file names
# in dictionary with the key being the series ID.
reader = sitk.ImageSeriesReader()
series_file_names = {}
series_IDs = list(reader.GetGDCMSeriesIDs(data_directory)) #list of all series
if series_IDs: #check that we have at least one series
for series in series_IDs:
series_file_names[series] = reader.GetGDCMSeriesFileNames(data_directory, series)
interact(DICOM_series_dropdown_callback, fixed_image=series_IDs, moving_image =series_IDs, series_dictionary=fixed(series_file_names));
else:
print('This is surprising, data directory does not contain any DICOM series.')
# Actually read the data based on the user's selection.
fixed_image = sitk.ReadImage(series_file_names[selected_series_fixed])
moving_image = sitk.ReadImage(series_file_names[selected_series_moving])
# Save images to file and view overlap using external viewer.
sitk.WriteImage(fixed_image, os.path.join(OUTPUT_DIR, "fixedImage.mha"))
sitk.WriteImage(moving_image, os.path.join(OUTPUT_DIR, "preAlignment.mha"))
###Output
_____no_output_____
###Markdown
Initial AlignmentA reasonable guesstimate for the initial translational alignment can be obtained by usingthe CenteredTransformInitializer (functional interface to the CenteredTransformInitializerFilter). The resulting transformation is centered with respect to the fixed image and thetranslation aligns the centers of the two images. There are two options fordefining the centers of the images, either the physical centersof the two data sets (GEOMETRY), or the centers defined by the intensity moments (MOMENTS).Two things to note about this filter, it requires the fixed and moving image have the same type even though it is not algorithmically required, and itsreturn type is the generic SimpleITK.Transform.
###Code
initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(fixed_image,moving_image.GetPixelID()),
moving_image,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
# Save moving image after initial transform and view overlap using external viewer.
save_transform_and_image(initial_transform, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "initialAlignment"))
###Output
_____no_output_____
###Markdown
Look at the transformation, what type is it?
###Code
print(initial_transform)
###Output
_____no_output_____
###Markdown
Final registration Version 1 Single scale (not using image pyramid). Initial transformation is not modified in place.Illustrate the need for scaling the step size differently for each parameter: SetOptimizerScalesFromIndexShift - estimated from maximum shift of voxel indexes (only use if data is isotropic). SetOptimizerScalesFromPhysicalShift - estimated from maximum shift of physical locations of voxels. SetOptimizerScalesFromJacobian - estimated from the averaged squared norm of the Jacobian w.r.t. parameters.Look at the optimizer's stopping condition to ensure we have not terminated prematurely.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100)
# Scale the step size differently for each parameter, this is critical!!!
registration_method.SetOptimizerScalesFromPhysicalShift()
registration_method.SetInitialTransform(initial_transform, inPlace=False)
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
final_transform_v1 = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
print(f'Optimizer\'s stopping condition, {registration_method.GetOptimizerStopConditionDescription()}')
print(f'Final metric value: {registration_method.GetMetricValue()}')
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform_v1, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v1"))
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it?
###Code
print(final_transform_v1)
###Output
_____no_output_____
###Markdown
Version 1.1The previous example illustrated the use of the ITK v4 registration framework in an ITK v3 manner. We only referred to a single transformation which was what we optimized.In ITK v4 the registration method accepts three transformations (if you look at the diagram above you will only see two transformations, Moving transform represents $T_{opt} \circ T_m$):SetInitialTransform, $T_{opt}$ - composed with the moving initial transform, maps points from the virtual image domain to the moving image domain, modified during optimization. SetFixedInitialTransform $T_f$- maps points from the virtual image domain to the fixed image domain, never modified.SetMovingInitialTransform $T_m$- maps points from the virtual image domain to the moving image domain, never modified.The transformation that maps points from the fixed to moving image domains is thus: $^M\mathbf{p} = T_{opt}(T_m(T_f^{-1}(^F\mathbf{p})))$We now modify the previous example to use $T_{opt}$ and $T_m$.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100)
registration_method.SetOptimizerScalesFromPhysicalShift()
# Set the initial moving and optimized transforms.
optimized_transform = sitk.Euler3DTransform()
registration_method.SetMovingInitialTransform(initial_transform)
registration_method.SetInitialTransform(optimized_transform)
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
# Need to compose the transformations after registration.
final_transform_v11 = sitk.CompositeTransform(optimized_transform)
final_transform_v11.AddTransform(initial_transform)
print(f'Optimizer\'s stopping condition, {registration_method.GetOptimizerStopConditionDescription()}')
print(f'Final metric value: {registration_method.GetMetricValue()}')
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform_v11, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v1.1"))
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it? Why is it different from the previous example?
###Code
print(final_transform_v11)
###Output
_____no_output_____
###Markdown
Version 2 Multi scale - specify both scale, and how much to smooth with respect to original image. Initial transformation modified in place, so in the end we have the same type of transformation in hand.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100) #, estimateLearningRate=registration_method.EachIteration)
registration_method.SetOptimizerScalesFromPhysicalShift()
final_transform = sitk.Euler3DTransform(initial_transform)
registration_method.SetInitialTransform(final_transform)
registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas = [2,1,0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent,
registration_callbacks.metric_update_multires_iterations)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
print(f'Optimizer\'s stopping condition, {registration_method.GetOptimizerStopConditionDescription()}')
print(f'Final metric value: {registration_method.GetMetricValue()}')
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform, fixed_image, moving_image, os.path.join(OUTPUT_DIR, 'finalAlignment-v2'))
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it?
###Code
print(final_transform)
###Output
_____no_output_____
###Markdown
Introduction to SimpleITKv4 Registration - Continued ITK v4 Registration Components Before starting with this notebook, please go over the first introductory notebook found [here](60_Registration_Introduction.ipynb). In this notebook we will visually assess registration by viewing the overlap between images using external viewers.The two viewers we recommend for this task are [ITK-SNAP](http://www.itksnap.org) and [3D Slicer](http://www.slicer.org/). ITK-SNAP supports concurrent linked viewing between multiple instances of the program. 3D Slicer supports concurrent viewing of multiple volumes via alpha blending.
###Code
import SimpleITK as sitk
# If the environment variable SIMPLE_ITK_MEMORY_CONSTRAINED_ENVIRONMENT is set, this will override the ReadImage
# function so that it also resamples the image to a smaller size (testing environment is memory constrained).
%run setup_for_testing
# Utility method that either downloads data from the network or
# if already downloaded returns the file name for reading from disk (cached data).
%run update_path_to_download_script
from downloaddata import fetch_data as fdata
# Always write output to a separate directory, we don't want to pollute the source directory.
import os
OUTPUT_DIR = "Output"
# GUI components (sliders, dropdown...).
from ipywidgets import interact, fixed
# Enable display of HTML.
from IPython.display import display, HTML
# Plots will be inlined.
%matplotlib inline
# Callbacks for plotting registration progress.
import registration_callbacks
###Output
_____no_output_____
###Markdown
Utility functionsA number of utility functions, saving a transform and corresponding resampled image, callback for selecting a DICOM series from several series found in the same directory.
###Code
def save_transform_and_image(transform, fixed_image, moving_image, outputfile_prefix):
"""
Write the given transformation to file, resample the moving_image onto the fixed_images grid and save the
result to file.
Args:
transform (SimpleITK Transform): transform that maps points from the fixed image coordinate system to the moving.
fixed_image (SimpleITK Image): resample onto the spatial grid defined by this image.
moving_image (SimpleITK Image): resample this image.
outputfile_prefix (string): transform is written to outputfile_prefix.tfm and resampled image is written to
outputfile_prefix.mha.
"""
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(fixed_image)
# SimpleITK supports several interpolation options, we go with the simplest that gives reasonable results.
resample.SetInterpolator(sitk.sitkLinear)
resample.SetTransform(transform)
sitk.WriteImage(resample.Execute(moving_image), outputfile_prefix + ".mha")
sitk.WriteTransform(transform, outputfile_prefix + ".tfm")
def DICOM_series_dropdown_callback(fixed_image, moving_image, series_dictionary):
"""
Callback from dropbox which selects the two series which will be used for registration.
The callback prints out some information about each of the series from the meta-data dictionary.
For a list of all meta-dictionary tags and their human readable names see DICOM standard part 6,
Data Dictionary (http://medical.nema.org/medical/dicom/current/output/pdf/part06.pdf)
"""
# The callback will update these global variables with the user selection.
global selected_series_fixed
global selected_series_moving
img_fixed = sitk.ReadImage(series_dictionary[fixed_image][0])
img_moving = sitk.ReadImage(series_dictionary[moving_image][0])
# There are many interesting tags in the DICOM data dictionary, display a selected few.
tags_to_print = {
"0010|0010": "Patient name: ",
"0008|0060": "Modality: ",
"0008|0021": "Series date: ",
"0008|0031": "Series time:",
"0008|0070": "Manufacturer: ",
}
html_table = []
html_table.append(
"<table><tr><td><b>Tag</b></td><td><b>Fixed Image</b></td><td><b>Moving Image</b></td></tr>"
)
for tag in tags_to_print:
fixed_tag = ""
moving_tag = ""
try:
fixed_tag = img_fixed.GetMetaData(tag)
except: # ignore if the tag isn't in the dictionary
pass
try:
moving_tag = img_moving.GetMetaData(tag)
except: # ignore if the tag isn't in the dictionary
pass
html_table.append(
"<tr><td>"
+ tags_to_print[tag]
+ "</td><td>"
+ fixed_tag
+ "</td><td>"
+ moving_tag
+ "</td></tr>"
)
html_table.append("</table>")
display(HTML("".join(html_table)))
selected_series_fixed = fixed_image
selected_series_moving = moving_image
###Output
_____no_output_____
###Markdown
Loading DataIn this notebook we will work with CT and MR scans of the CIRS 057A multi-modality abdominal phantom. The scans are multi-slice DICOM images. The data is stored in a zip archive which is automatically retrieved and extracted when we request a file which is part of the archive.
###Code
data_directory = os.path.dirname(fdata("CIRS057A_MR_CT_DICOM/readme.txt"))
# 'selected_series_moving/fixed' will be updated by the interact function.
selected_series_fixed = ""
selected_series_moving = ""
# Directory contains multiple DICOM studies/series, store the file names
# in dictionary with the key being the series ID.
reader = sitk.ImageSeriesReader()
series_file_names = {}
series_IDs = list(reader.GetGDCMSeriesIDs(data_directory)) # list of all series
if series_IDs: # check that we have at least one series
for series in series_IDs:
series_file_names[series] = reader.GetGDCMSeriesFileNames(
data_directory, series
)
interact(
DICOM_series_dropdown_callback,
fixed_image=series_IDs,
moving_image=series_IDs,
series_dictionary=fixed(series_file_names),
)
else:
print("This is surprising, data directory does not contain any DICOM series.")
# Actually read the data based on the user's selection.
fixed_image = sitk.ReadImage(series_file_names[selected_series_fixed])
moving_image = sitk.ReadImage(series_file_names[selected_series_moving])
# Save images to file and view overlap using external viewer.
sitk.WriteImage(fixed_image, os.path.join(OUTPUT_DIR, "fixedImage.mha"))
sitk.WriteImage(moving_image, os.path.join(OUTPUT_DIR, "preAlignment.mha"))
###Output
_____no_output_____
###Markdown
Initial AlignmentA reasonable guesstimate for the initial translational alignment can be obtained by usingthe CenteredTransformInitializer (functional interface to the CenteredTransformInitializerFilter). The resulting transformation is centered with respect to the fixed image and thetranslation aligns the centers of the two images. There are two options fordefining the centers of the images, either the physical centersof the two data sets (GEOMETRY), or the centers defined by the intensity moments (MOMENTS).Two things to note about this filter, it requires the fixed and moving image have the same type even though it is not algorithmically required, and itsreturn type is the generic SimpleITK.Transform.
###Code
initial_transform = sitk.CenteredTransformInitializer(
sitk.Cast(fixed_image, moving_image.GetPixelID()),
moving_image,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY,
)
# Save moving image after initial transform and view overlap using external viewer.
save_transform_and_image(
initial_transform,
fixed_image,
moving_image,
os.path.join(OUTPUT_DIR, "initialAlignment"),
)
###Output
_____no_output_____
###Markdown
Look at the transformation, what type is it?
###Code
print(initial_transform)
###Output
_____no_output_____
###Markdown
Final registration Version 1 Single scale (not using image pyramid). Initial transformation is not modified in place.Illustrate the need for scaling the step size differently for each parameter: SetOptimizerScalesFromIndexShift - estimated from maximum shift of voxel indexes (only use if data is isotropic). SetOptimizerScalesFromPhysicalShift - estimated from maximum shift of physical locations of voxels. SetOptimizerScalesFromJacobian - estimated from the averaged squared norm of the Jacobian w.r.t. parameters.Look at the optimizer's stopping condition to ensure we have not terminated prematurely.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
# The learningRate parameter is always required. Using the default
# configuration this parameter is ignored because it is overridden
# by the default setting of the estimateLearningRate parameter which
# is sitk.ImageRegistrationMethod.Once. For the user selected
# learningRate to take effect you need to also set the
# estimateLearningRate parameter to sitk.ImageRegistrationMethod.Never
registration_method.SetOptimizerAsGradientDescent(
learningRate=1.0, numberOfIterations=100
)
# Scale the step size differently for each parameter, this is critical!!!
registration_method.SetOptimizerScalesFromPhysicalShift()
registration_method.SetInitialTransform(initial_transform, inPlace=False)
registration_method.AddCommand(
sitk.sitkStartEvent, registration_callbacks.metric_start_plot
)
registration_method.AddCommand(
sitk.sitkEndEvent, registration_callbacks.metric_end_plot
)
registration_method.AddCommand(
sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method),
)
final_transform_v1 = registration_method.Execute(
sitk.Cast(fixed_image, sitk.sitkFloat32), sitk.Cast(moving_image, sitk.sitkFloat32)
)
print(
f"Optimizer's stopping condition, {registration_method.GetOptimizerStopConditionDescription()}"
)
print(f"Final metric value: {registration_method.GetMetricValue()}")
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(
final_transform_v1,
fixed_image,
moving_image,
os.path.join(OUTPUT_DIR, "finalAlignment-v1"),
)
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it?
###Code
print(final_transform_v1)
###Output
_____no_output_____
###Markdown
Version 1.1The previous example illustrated the use of the ITK v4 registration framework in an ITK v3 manner. We only referred to a single transformation which was what we optimized.In ITK v4 the registration method accepts three transformations (if you look at the diagram above you will only see two transformations, Moving transform represents $T_{opt} \circ T_m$):SetInitialTransform, $T_{opt}$ - composed with the moving initial transform, maps points from the virtual image domain to the moving image domain, modified during optimization. SetFixedInitialTransform $T_f$- maps points from the virtual image domain to the fixed image domain, never modified.SetMovingInitialTransform $T_m$- maps points from the virtual image domain to the moving image domain, never modified.The transformation that maps points from the fixed to moving image domains is thus: $^M\mathbf{p} = T_{opt}(T_m(T_f^{-1}(^F\mathbf{p})))$We now modify the previous example to use $T_{opt}$ and $T_m$.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(
learningRate=1.0, numberOfIterations=100
)
registration_method.SetOptimizerScalesFromPhysicalShift()
# Set the initial moving and optimized transforms.
optimized_transform = sitk.Euler3DTransform()
registration_method.SetMovingInitialTransform(initial_transform)
registration_method.SetInitialTransform(optimized_transform)
registration_method.AddCommand(
sitk.sitkStartEvent, registration_callbacks.metric_start_plot
)
registration_method.AddCommand(
sitk.sitkEndEvent, registration_callbacks.metric_end_plot
)
registration_method.AddCommand(
sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method),
)
registration_method.Execute(
sitk.Cast(fixed_image, sitk.sitkFloat32), sitk.Cast(moving_image, sitk.sitkFloat32)
)
# Need to compose the transformations after registration.
final_transform_v11 = sitk.CompositeTransform(optimized_transform)
final_transform_v11.AddTransform(initial_transform)
print(
f"Optimizer's stopping condition, {registration_method.GetOptimizerStopConditionDescription()}"
)
print(f"Final metric value: {registration_method.GetMetricValue()}")
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(
final_transform_v11,
fixed_image,
moving_image,
os.path.join(OUTPUT_DIR, "finalAlignment-v1.1"),
)
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it? Why is it different from the previous example?
###Code
print(final_transform_v11)
###Output
_____no_output_____
###Markdown
Version 2 Multi scale - specify both scale, and how much to smooth with respect to original image. Initial transformation modified in place, so in the end we have the same type of transformation in hand.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(
learningRate=1.0, numberOfIterations=100
) # , estimateLearningRate=registration_method.EachIteration)
registration_method.SetOptimizerScalesFromPhysicalShift()
final_transform = sitk.Euler3DTransform(initial_transform)
registration_method.SetInitialTransform(final_transform)
registration_method.SetShrinkFactorsPerLevel(shrinkFactors=[4, 2, 1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2, 1, 0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
registration_method.AddCommand(
sitk.sitkStartEvent, registration_callbacks.metric_start_plot
)
registration_method.AddCommand(
sitk.sitkEndEvent, registration_callbacks.metric_end_plot
)
registration_method.AddCommand(
sitk.sitkMultiResolutionIterationEvent,
registration_callbacks.metric_update_multires_iterations,
)
registration_method.AddCommand(
sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method),
)
registration_method.Execute(
sitk.Cast(fixed_image, sitk.sitkFloat32), sitk.Cast(moving_image, sitk.sitkFloat32)
)
print(
f"Optimizer's stopping condition, {registration_method.GetOptimizerStopConditionDescription()}"
)
print(f"Final metric value: {registration_method.GetMetricValue()}")
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(
final_transform,
fixed_image,
moving_image,
os.path.join(OUTPUT_DIR, "finalAlignment-v2"),
)
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it?
###Code
print(final_transform)
###Output
_____no_output_____
###Markdown
Introduction to SimpleITKv4 Registration - Continued ITK v4 Registration Components Before starting with this notebook, please go over the first introductory notebook found [here](60_Registration_Introduction.ipynb). In this notebook we will visually assess registration by viewing the overlap between images using external viewers.The two viewers we recommend for this task are [ITK-SNAP](http://www.itksnap.org) and [3D Slicer](http://www.slicer.org/). ITK-SNAP supports concurrent linked viewing between multiple instances of the program. 3D Slicer supports concurrent viewing of multiple volumes via alpha blending.
###Code
import SimpleITK as sitk
# If the environment variable SIMPLE_ITK_MEMORY_CONSTRAINED_ENVIRONMENT is set, this will override the ReadImage
# function so that it also resamples the image to a smaller size (testing environment is memory constrained).
%run setup_for_testing
# Utility method that either downloads data from the network or
# if already downloaded returns the file name for reading from disk (cached data).
%run update_path_to_download_script
from downloaddata import fetch_data as fdata
# Always write output to a separate directory, we don't want to pollute the source directory.
import os
OUTPUT_DIR = 'Output'
# GUI components (sliders, dropdown...).
from ipywidgets import interact, fixed
# Enable display of HTML.
from IPython.display import display, HTML
# Plots will be inlined.
%matplotlib inline
# Callbacks for plotting registration progress.
import registration_callbacks
###Output
_____no_output_____
###Markdown
Utility functionsA number of utility functions, saving a transform and corresponding resampled image, callback for selecting a DICOM series from several series found in the same directory.
###Code
def save_transform_and_image(transform, fixed_image, moving_image, outputfile_prefix):
"""
Write the given transformation to file, resample the moving_image onto the fixed_images grid and save the
result to file.
Args:
transform (SimpleITK Transform): transform that maps points from the fixed image coordinate system to the moving.
fixed_image (SimpleITK Image): resample onto the spatial grid defined by this image.
moving_image (SimpleITK Image): resample this image.
outputfile_prefix (string): transform is written to outputfile_prefix.tfm and resampled image is written to
outputfile_prefix.mha.
"""
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(fixed_image)
# SimpleITK supports several interpolation options, we go with the simplest that gives reasonable results.
resample.SetInterpolator(sitk.sitkLinear)
resample.SetTransform(transform)
sitk.WriteImage(resample.Execute(moving_image), outputfile_prefix+'.mha')
sitk.WriteTransform(transform, outputfile_prefix+'.tfm')
def DICOM_series_dropdown_callback(fixed_image, moving_image, series_dictionary):
"""
Callback from dropbox which selects the two series which will be used for registration.
The callback prints out some information about each of the series from the meta-data dictionary.
For a list of all meta-dictionary tags and their human readable names see DICOM standard part 6,
Data Dictionary (http://medical.nema.org/medical/dicom/current/output/pdf/part06.pdf)
"""
# The callback will update these global variables with the user selection.
global selected_series_fixed
global selected_series_moving
img_fixed = sitk.ReadImage(series_dictionary[fixed_image][0])
img_moving = sitk.ReadImage(series_dictionary[moving_image][0])
# There are many interesting tags in the DICOM data dictionary, display a selected few.
tags_to_print = {'0010|0010': 'Patient name: ',
'0008|0060' : 'Modality: ',
'0008|0021' : 'Series date: ',
'0008|0031' : 'Series time:',
'0008|0070' : 'Manufacturer: '}
html_table = []
html_table.append('<table><tr><td><b>Tag</b></td><td><b>Fixed Image</b></td><td><b>Moving Image</b></td></tr>')
for tag in tags_to_print:
fixed_tag = ''
moving_tag = ''
try:
fixed_tag = img_fixed.GetMetaData(tag)
except: # ignore if the tag isn't in the dictionary
pass
try:
moving_tag = img_moving.GetMetaData(tag)
except: # ignore if the tag isn't in the dictionary
pass
html_table.append('<tr><td>' + tags_to_print[tag] +
'</td><td>' + fixed_tag +
'</td><td>' + moving_tag + '</td></tr>')
html_table.append('</table>')
display(HTML(''.join(html_table)))
selected_series_fixed = fixed_image
selected_series_moving = moving_image
###Output
_____no_output_____
###Markdown
Loading DataIn this notebook we will work with CT and MR scans of the CIRS 057A multi-modality abdominal phantom. The scans are multi-slice DICOM images. The data is stored in a zip archive which is automatically retrieved and extracted when we request a file which is part of the archive.
###Code
data_directory = os.path.dirname(fdata("CIRS057A_MR_CT_DICOM/readme.txt"))
# 'selected_series_moving/fixed' will be updated by the interact function.
selected_series_fixed = ''
selected_series_moving = ''
# Directory contains multiple DICOM studies/series, store the file names
# in dictionary with the key being the series ID.
reader = sitk.ImageSeriesReader()
series_file_names = {}
series_IDs = list(reader.GetGDCMSeriesIDs(data_directory)) #list of all series
if series_IDs: #check that we have at least one series
for series in series_IDs:
series_file_names[series] = reader.GetGDCMSeriesFileNames(data_directory, series)
interact(DICOM_series_dropdown_callback, fixed_image=series_IDs, moving_image =series_IDs, series_dictionary=fixed(series_file_names));
else:
print('This is surprising, data directory does not contain any DICOM series.')
# Actually read the data based on the user's selection.
fixed_image = sitk.ReadImage(series_file_names[selected_series_fixed])
moving_image = sitk.ReadImage(series_file_names[selected_series_moving])
# Save images to file and view overlap using external viewer.
sitk.WriteImage(fixed_image, os.path.join(OUTPUT_DIR, "fixedImage.mha"))
sitk.WriteImage(moving_image, os.path.join(OUTPUT_DIR, "preAlignment.mha"))
###Output
_____no_output_____
###Markdown
Initial AlignmentA reasonable guesstimate for the initial translational alignment can be obtained by usingthe CenteredTransformInitializer (functional interface to the CenteredTransformInitializerFilter). The resulting transformation is centered with respect to the fixed image and thetranslation aligns the centers of the two images. There are two options fordefining the centers of the images, either the physical centersof the two data sets (GEOMETRY), or the centers defined by the intensity moments (MOMENTS).Two things to note about this filter, it requires the fixed and moving image have the same type even though it is not algorithmically required, and itsreturn type is the generic SimpleITK.Transform.
###Code
initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(fixed_image,moving_image.GetPixelID()),
moving_image,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
# Save moving image after initial transform and view overlap using external viewer.
save_transform_and_image(initial_transform, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "initialAlignment"))
###Output
_____no_output_____
###Markdown
Look at the transformation, what type is it?
###Code
print(initial_transform)
###Output
_____no_output_____
###Markdown
Final registration Version 1 Single scale (not using image pyramid). Initial transformation is not modified in place.Illustrate the need for scaling the step size differently for each parameter: SetOptimizerScalesFromIndexShift - estimated from maximum shift of voxel indexes (only use if data is isotropic). SetOptimizerScalesFromPhysicalShift - estimated from maximum shift of physical locations of voxels. SetOptimizerScalesFromJacobian - estimated from the averaged squared norm of the Jacobian w.r.t. parameters.Look at the optimizer's stopping condition to ensure we have not terminated prematurely.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100)
# Scale the step size differently for each parameter, this is critical!!!
registration_method.SetOptimizerScalesFromPhysicalShift()
registration_method.SetInitialTransform(initial_transform, inPlace=False)
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
final_transform_v1 = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform_v1, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v1"))
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it?
###Code
print(final_transform_v1)
###Output
_____no_output_____
###Markdown
Version 1.1The previous example illustrated the use of the ITK v4 registration framework in an ITK v3 manner. We only referred to a single transformation which was what we optimized.In ITK v4 the registration method accepts three transformations (if you look at the diagram above you will only see two transformations, Moving transform represents $T_{opt} \circ T_m$):SetInitialTransform, $T_{opt}$ - composed with the moving initial transform, maps points from the virtual image domain to the moving image domain, modified during optimization. SetFixedInitialTransform $T_f$- maps points from the virtual image domain to the fixed image domain, never modified.SetMovingInitialTransform $T_m$- maps points from the virtual image domain to the moving image domain, never modified.The transformation that maps points from the fixed to moving image domains is thus: $^M\mathbf{p} = T_{opt}(T_m(T_f^{-1}(^F\mathbf{p})))$We now modify the previous example to use $T_{opt}$ and $T_m$.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100)
registration_method.SetOptimizerScalesFromPhysicalShift()
# Set the initial moving and optimized transforms.
optimized_transform = sitk.Euler3DTransform()
registration_method.SetMovingInitialTransform(initial_transform)
registration_method.SetInitialTransform(optimized_transform)
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
# Need to compose the transformations after registration.
final_transform_v11 = sitk.CompositeTransform(optimized_transform)
final_transform_v11.AddTransform(initial_transform)
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform_v11, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v1.1"))
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it? Why is it different from the previous example?
###Code
print(final_transform_v11)
###Output
_____no_output_____
###Markdown
Version 2 Multi scale - specify both scale, and how much to smooth with respect to original image. Initial transformation modified in place, so in the end we have the same type of transformation in hand.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100) #, estimateLearningRate=registration_method.EachIteration)
registration_method.SetOptimizerScalesFromPhysicalShift()
final_transform = sitk.Euler3DTransform(initial_transform)
registration_method.SetInitialTransform(final_transform)
registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas = [2,1,0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent,
registration_callbacks.metric_update_multires_iterations)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform, fixed_image, moving_image, os.path.join(OUTPUT_DIR, 'finalAlignment-v2'))
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it?
###Code
print(final_transform)
###Output
_____no_output_____
###Markdown
Introduction to SimpleITKv4 Registration - Continued ITK v4 Registration Components Before starting with this notebook, please go over the first introductory notebook found [here](60_Registration_Introduction.ipynb). In this notebook we will visually assess registration by viewing the overlap between images using external viewers.The two viewers we recommend for this task are [ITK-SNAP](http://www.itksnap.org) and [3D Slicer](http://www.slicer.org/). ITK-SNAP supports concurrent linked viewing between multiple instances of the program. 3D Slicer supports concurrent viewing of multiple volumes via alpha blending.
###Code
import SimpleITK as sitk
# If the environment variable SIMPLE_ITK_MEMORY_CONSTRAINED_ENVIRONMENT is set, this will override the ReadImage
# function so that it also resamples the image to a smaller size (testing environment is memory constrained).
%run setup_for_testing
# Utility method that either downloads data from the network or
# if already downloaded returns the file name for reading from disk (cached data).
%run update_path_to_download_script
from downloaddata import fetch_data as fdata
# Always write output to a separate directory, we don't want to pollute the source directory.
import os
OUTPUT_DIR = 'Output'
# GUI components (sliders, dropdown...).
from ipywidgets import interact, fixed
# Enable display of HTML.
from IPython.display import display, HTML
# Plots will be inlined.
%matplotlib inline
# Callbacks for plotting registration progress.
import registration_callbacks
###Output
_____no_output_____
###Markdown
Utility functionsA number of utility functions, saving a transform and corresponding resampled image, callback for selecting a DICOM series from several series found in the same directory.
###Code
def save_transform_and_image(transform, fixed_image, moving_image, outputfile_prefix):
"""
Write the given transformation to file, resample the moving_image onto the fixed_images grid and save the
result to file.
Args:
transform (SimpleITK Transform): transform that maps points from the fixed image coordinate system to the moving.
fixed_image (SimpleITK Image): resample onto the spatial grid defined by this image.
moving_image (SimpleITK Image): resample this image.
outputfile_prefix (string): transform is written to outputfile_prefix.tfm and resampled image is written to
outputfile_prefix.mha.
"""
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(fixed_image)
# SimpleITK supports several interpolation options, we go with the simplest that gives reasonable results.
resample.SetInterpolator(sitk.sitkLinear)
resample.SetTransform(transform)
sitk.WriteImage(resample.Execute(moving_image), outputfile_prefix+'.mha')
sitk.WriteTransform(transform, outputfile_prefix+'.tfm')
def DICOM_series_dropdown_callback(fixed_image, moving_image, series_dictionary):
"""
Callback from dropbox which selects the two series which will be used for registration.
The callback prints out some information about each of the series from the meta-data dictionary.
For a list of all meta-dictionary tags and their human readable names see DICOM standard part 6,
Data Dictionary (http://medical.nema.org/medical/dicom/current/output/pdf/part06.pdf)
"""
# The callback will update these global variables with the user selection.
global selected_series_fixed
global selected_series_moving
img_fixed = sitk.ReadImage(series_dictionary[fixed_image][0])
img_moving = sitk.ReadImage(series_dictionary[moving_image][0])
# There are many interesting tags in the DICOM data dictionary, display a selected few.
tags_to_print = {'0010|0010': 'Patient name: ',
'0008|0060' : 'Modality: ',
'0008|0021' : 'Series date: ',
'0008|0031' : 'Series time:',
'0008|0070' : 'Manufacturer: '}
html_table = []
html_table.append('<table><tr><td><b>Tag</b></td><td><b>Fixed Image</b></td><td><b>Moving Image</b></td></tr>')
for tag in tags_to_print:
fixed_tag = ''
moving_tag = ''
try:
fixed_tag = img_fixed.GetMetaData(tag)
except: # ignore if the tag isn't in the dictionary
pass
try:
moving_tag = img_moving.GetMetaData(tag)
except: # ignore if the tag isn't in the dictionary
pass
html_table.append('<tr><td>' + tags_to_print[tag] +
'</td><td>' + fixed_tag +
'</td><td>' + moving_tag + '</td></tr>')
html_table.append('</table>')
display(HTML(''.join(html_table)))
selected_series_fixed = fixed_image
selected_series_moving = moving_image
###Output
_____no_output_____
###Markdown
Loading DataIn this notebook we will work with CT and MR scans of the CIRS 057A multi-modality abdominal phantom. The scans are multi-slice DICOM images. The data is stored in a zip archive which is automatically retrieved and extracted when we request a file which is part of the archive.
###Code
data_directory = os.path.dirname(fdata("CIRS057A_MR_CT_DICOM/readme.txt"))
# 'selected_series_moving/fixed' will be updated by the interact function.
selected_series_fixed = ''
selected_series_moving = ''
# Directory contains multiple DICOM studies/series, store the file names
# in dictionary with the key being the series ID.
reader = sitk.ImageSeriesReader()
series_file_names = {}
series_IDs = list(reader.GetGDCMSeriesIDs(data_directory)) #list of all series
if series_IDs: #check that we have at least one series
for series in series_IDs:
series_file_names[series] = reader.GetGDCMSeriesFileNames(data_directory, series)
interact(DICOM_series_dropdown_callback, fixed_image=series_IDs, moving_image =series_IDs, series_dictionary=fixed(series_file_names));
else:
print('This is surprising, data directory does not contain any DICOM series.')
# Actually read the data based on the user's selection.
fixed_image = sitk.ReadImage(series_file_names[selected_series_fixed])
moving_image = sitk.ReadImage(series_file_names[selected_series_moving])
# Save images to file and view overlap using external viewer.
sitk.WriteImage(fixed_image, os.path.join(OUTPUT_DIR, "fixedImage.mha"))
sitk.WriteImage(moving_image, os.path.join(OUTPUT_DIR, "preAlignment.mha"))
###Output
_____no_output_____
###Markdown
Initial AlignmentA reasonable guesstimate for the initial translational alignment can be obtained by usingthe CenteredTransformInitializer (functional interface to the CenteredTransformInitializerFilter). The resulting transformation is centered with respect to the fixed image and thetranslation aligns the centers of the two images. There are two options fordefining the centers of the images, either the physical centersof the two data sets (GEOMETRY), or the centers defined by the intensity moments (MOMENTS).Two things to note about this filter, it requires the fixed and moving image have the same type even though it is not algorithmically required, and itsreturn type is the generic SimpleITK.Transform.
###Code
initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(fixed_image,moving_image.GetPixelID()),
moving_image,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
# Save moving image after initial transform and view overlap using external viewer.
save_transform_and_image(initial_transform, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "initialAlignment"))
###Output
_____no_output_____
###Markdown
Look at the transformation, what type is it?
###Code
print(initial_transform)
###Output
itk::simple::Transform
Euler3DTransform (0x1b79f50)
RTTI typeinfo: itk::Euler3DTransform<double>
Reference Count: 1
Modified Time: 20924
Debug: Off
Object Name:
Observers:
none
Matrix:
1 0 0
0 1 0
0 0 1
Offset: [0, 0, 0]
Center: [-0.328125, -0.328125, -106.875]
Translation: [0, 0, 0]
Inverse:
1 0 0
0 1 0
0 0 1
Singular: 0
Euler's angles: AngleX=0 AngleY=0 AngleZ=0
m_ComputeZYX = 0
###Markdown
Final registration Version 1 Single scale (not using image pyramid). Initial transformation is not modified in place.Illustrate the need for scaling the step size differently for each parameter: SetOptimizerScalesFromIndexShift - estimated from maximum shift of voxel indexes (only use if data is isotropic). SetOptimizerScalesFromPhysicalShift - estimated from maximum shift of physical locations of voxels. SetOptimizerScalesFromJacobian - estimated from the averaged squared norm of the Jacobian w.r.t. parameters.Look at the optimizer's stopping condition to ensure we have not terminated prematurely.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100)
# Scale the step size differently for each parameter, this is critical!!!
registration_method.SetOptimizerScalesFromPhysicalShift()
registration_method.SetInitialTransform(initial_transform, inPlace=False)
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
final_transform_v1 = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform_v1, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v1"))
###Output
Optimizer's stopping condition, GradientDescentOptimizerv4Template: Convergence checker passed at iteration 9.
Final metric value: -1.0545418817048626
###Markdown
Look at the final transformation, what type is it?
###Code
print(final_transform_v1)
###Output
itk::simple::Transform
CompositeTransform (0x19009d0)
RTTI typeinfo: itk::CompositeTransform<double, 3u>
Reference Count: 1
Modified Time: 2989068
Debug: Off
Object Name:
Observers:
none
Transforms in queue, from begin to end:
>>>>>>>>>
Euler3DTransform (0x25961e0)
RTTI typeinfo: itk::Euler3DTransform<double>
Reference Count: 1
Modified Time: 2988487
Debug: Off
Object Name:
Observers:
none
Matrix:
1 -3.27282e-05 -0.000140821
3.26968e-05 1 -0.000223135
0.000140828 0.00022313 1
Offset: [0.116343, 1.74431, 0.9241]
Center: [-0.328125, -0.328125, -106.875]
Translation: [0.131404, 1.76815, 0.923985]
Inverse:
1 3.26968e-05 0.000140828
-3.27282e-05 1 0.00022313
-0.000140821 -0.000223135 1
Singular: 0
Euler's angles: AngleX=0.00022313 AngleY=-0.000140828 AngleZ=3.27282e-05
m_ComputeZYX = 0
End of MultiTransform.
<<<<<<<<<<
TransformsToOptimizeFlags, begin() to end():
1
TransformsToOptimize in queue, from begin to end:
End of TransformsToOptimizeQueue.
<<<<<<<<<<
End of CompositeTransform.
<<<<<<<<<<
###Markdown
Version 1.1The previous example illustrated the use of the ITK v4 registration framework in an ITK v3 manner. We only referred to a single transformation which was what we optimized.In ITK v4 the registration method accepts three transformations (if you look at the diagram above you will only see two transformations, Moving transform represents $T_{opt} \circ T_m$):SetInitialTransform, $T_{opt}$ - composed with the moving initial transform, maps points from the virtual image domain to the moving image domain, modified during optimization. SetFixedInitialTransform $T_f$- maps points from the virtual image domain to the fixed image domain, never modified.SetMovingInitialTransform $T_m$- maps points from the virtual image domain to the moving image domain, never modified.The transformation that maps points from the fixed to moving image domains is thus: $^M\mathbf{p} = T_{opt}(T_m(T_f^{-1}(^F\mathbf{p})))$We now modify the previous example to use $T_{opt}$ and $T_m$.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100)
registration_method.SetOptimizerScalesFromPhysicalShift()
# Set the initial moving and optimized transforms.
optimized_transform = sitk.Euler3DTransform()
registration_method.SetMovingInitialTransform(initial_transform)
registration_method.SetInitialTransform(optimized_transform)
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
# Need to compose the transformations after registration.
final_transform_v11 = sitk.Transform(optimized_transform)
final_transform_v11.AddTransform(initial_transform)
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform_v11, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v1.1"))
###Output
Optimizer's stopping condition, GradientDescentOptimizerv4Template: Convergence checker passed at iteration 9.
Final metric value: -0.8603736995320468
###Markdown
Look at the final transformation, what type is it? Why is it different from the previous example?
###Code
print(final_transform_v11)
###Output
itk::simple::Transform
CompositeTransform (0x18b0d50)
RTTI typeinfo: itk::CompositeTransform<double, 3u>
Reference Count: 1
Modified Time: 5957237
Debug: Off
Object Name:
Observers:
none
Transforms in queue, from begin to end:
>>>>>>>>>
Euler3DTransform (0x1730cf0)
RTTI typeinfo: itk::Euler3DTransform<double>
Reference Count: 1
Modified Time: 5957231
Debug: Off
Object Name:
Observers:
none
Matrix:
1 -0.000433833 -0.000217996
0.000433864 1 0.000140411
0.000217935 -0.000140506 1
Offset: [-0.72222, 4.61237, 1.82938]
Center: [-0.328125, -0.328125, -106.875]
Translation: [-0.698779, 4.59722, 1.82936]
Inverse:
1 0.000433864 0.000217935
-0.000433833 1 -0.000140506
-0.000217996 0.000140411 1
Singular: 0
Euler's angles: AngleX=-0.000140506 AngleY=-0.000217935 AngleZ=0.000433833
m_ComputeZYX = 0
>>>>>>>>>
Euler3DTransform (0x1b79f50)
RTTI typeinfo: itk::Euler3DTransform<double>
Reference Count: 3
Modified Time: 20924
Debug: Off
Object Name:
Observers:
none
Matrix:
1 0 0
0 1 0
0 0 1
Offset: [0, 0, 0]
Center: [-0.328125, -0.328125, -106.875]
Translation: [0, 0, 0]
Inverse:
1 0 0
0 1 0
0 0 1
Singular: 0
Euler's angles: AngleX=0 AngleY=0 AngleZ=0
m_ComputeZYX = 0
End of MultiTransform.
<<<<<<<<<<
TransformsToOptimizeFlags, begin() to end():
0 1
TransformsToOptimize in queue, from begin to end:
End of TransformsToOptimizeQueue.
<<<<<<<<<<
End of CompositeTransform.
<<<<<<<<<<
###Markdown
Version 2 Multi scale - specify both scale, and how much to smooth with respect to original image. Initial transformation modified in place, so in the end we have the same type of transformation in hand.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100) #, estimateLearningRate=registration_method.EachIteration)
registration_method.SetOptimizerScalesFromPhysicalShift()
final_transform = sitk.Euler3DTransform(initial_transform)
registration_method.SetInitialTransform(final_transform)
registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas = [2,1,0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent,
registration_callbacks.metric_update_multires_iterations)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform, fixed_image, moving_image, os.path.join(OUTPUT_DIR, 'finalAlignment-v2'))
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it?
###Code
print(final_transform)
###Output
_____no_output_____
###Markdown
Introduction to SimpleITKv4 Registration - Continued ITK v4 Registration Components Before starting with this notebook, please go over the first introductory notebook found [here](60_Registration_Introduction.ipynb). In this notebook we will visually assess registration by viewing the overlap between images using external viewers.The two viewers we recommend for this task are [ITK-SNAP](http://www.itksnap.org) and [3D Slicer](http://www.slicer.org/). ITK-SNAP supports concurrent linked viewing between multiple instances of the program. 3D Slicer supports concurrent viewing of multiple volumes via alpha blending.
###Code
import SimpleITK as sitk
# If the environment variable SIMPLE_ITK_MEMORY_CONSTRAINED_ENVIRONMENT is set, this will override the ReadImage
# function so that it also resamples the image to a smaller size (testing environment is memory constrained).
%run setup_for_testing
# Utility method that either downloads data from the network or
# if already downloaded returns the file name for reading from disk (cached data).
%run update_path_to_download_script
from downloaddata import fetch_data as fdata
# Always write output to a separate directory, we don't want to pollute the source directory.
import os
OUTPUT_DIR = 'Output'
# GUI components (sliders, dropdown...).
from ipywidgets import interact, fixed
# Enable display of HTML.
from IPython.display import display, HTML
# Plots will be inlined.
%matplotlib inline
# Callbacks for plotting registration progress.
import registration_callbacks
###Output
_____no_output_____
###Markdown
Utility functionsA number of utility functions, saving a transform and corresponding resampled image, callback for selecting a DICOM series from several series found in the same directory.
###Code
def save_transform_and_image(transform, fixed_image, moving_image, outputfile_prefix):
"""
Write the given transformation to file, resample the moving_image onto the fixed_images grid and save the
result to file.
Args:
transform (SimpleITK Transform): transform that maps points from the fixed image coordinate system to the moving.
fixed_image (SimpleITK Image): resample onto the spatial grid defined by this image.
moving_image (SimpleITK Image): resample this image.
outputfile_prefix (string): transform is written to outputfile_prefix.tfm and resampled image is written to
outputfile_prefix.mha.
"""
resample = sitk.ResampleImageFilter()
resample.SetReferenceImage(fixed_image)
# SimpleITK supports several interpolation options, we go with the simplest that gives reasonable results.
resample.SetInterpolator(sitk.sitkLinear)
resample.SetTransform(transform)
sitk.WriteImage(resample.Execute(moving_image), outputfile_prefix+'.mha')
sitk.WriteTransform(transform, outputfile_prefix+'.tfm')
def DICOM_series_dropdown_callback(fixed_image, moving_image, series_dictionary):
"""
Callback from dropbox which selects the two series which will be used for registration.
The callback prints out some information about each of the series from the meta-data dictionary.
For a list of all meta-dictionary tags and their human readable names see DICOM standard part 6,
Data Dictionary (http://medical.nema.org/medical/dicom/current/output/pdf/part06.pdf)
"""
# The callback will update these global variables with the user selection.
global selected_series_fixed
global selected_series_moving
img_fixed = sitk.ReadImage(series_dictionary[fixed_image][0])
img_moving = sitk.ReadImage(series_dictionary[moving_image][0])
# There are many interesting tags in the DICOM data dictionary, display a selected few.
tags_to_print = {'0010|0010': 'Patient name: ',
'0008|0060' : 'Modality: ',
'0008|0021' : 'Series date: ',
'0008|0031' : 'Series time:',
'0008|0070' : 'Manufacturer: '}
html_table = []
html_table.append('<table><tr><td><b>Tag</b></td><td><b>Fixed Image</b></td><td><b>Moving Image</b></td></tr>')
for tag in tags_to_print:
fixed_tag = ''
moving_tag = ''
try:
fixed_tag = img_fixed.GetMetaData(tag)
except: # ignore if the tag isn't in the dictionary
pass
try:
moving_tag = img_moving.GetMetaData(tag)
except: # ignore if the tag isn't in the dictionary
pass
html_table.append('<tr><td>' + tags_to_print[tag] +
'</td><td>' + fixed_tag +
'</td><td>' + moving_tag + '</td></tr>')
html_table.append('</table>')
display(HTML(''.join(html_table)))
selected_series_fixed = fixed_image
selected_series_moving = moving_image
###Output
_____no_output_____
###Markdown
Loading DataIn this notebook we will work with CT and MR scans of the CIRS 057A multi-modality abdominal phantom. The scans are multi-slice DICOM images. The data is stored in a zip archive which is automatically retrieved and extracted when we request a file which is part of the archive.
###Code
data_directory = os.path.dirname(fdata("CIRS057A_MR_CT_DICOM/readme.txt"))
# 'selected_series_moving/fixed' will be updated by the interact function.
selected_series_fixed = ''
selected_series_moving = ''
# Directory contains multiple DICOM studies/series, store the file names
# in dictionary with the key being the series ID.
reader = sitk.ImageSeriesReader()
series_file_names = {}
series_IDs = list(reader.GetGDCMSeriesIDs(data_directory)) #list of all series
if series_IDs: #check that we have at least one series
for series in series_IDs:
series_file_names[series] = reader.GetGDCMSeriesFileNames(data_directory, series)
interact(DICOM_series_dropdown_callback, fixed_image=series_IDs, moving_image =series_IDs, series_dictionary=fixed(series_file_names));
else:
print('This is surprising, data directory does not contain any DICOM series.')
# Actually read the data based on the user's selection.
fixed_image = sitk.ReadImage(series_file_names[selected_series_fixed])
moving_image = sitk.ReadImage(series_file_names[selected_series_moving])
# Save images to file and view overlap using external viewer.
sitk.WriteImage(fixed_image, os.path.join(OUTPUT_DIR, "fixedImage.mha"))
sitk.WriteImage(moving_image, os.path.join(OUTPUT_DIR, "preAlignment.mha"))
###Output
_____no_output_____
###Markdown
Initial AlignmentA reasonable guesstimate for the initial translational alignment can be obtained by usingthe CenteredTransformInitializer (functional interface to the CenteredTransformInitializerFilter). The resulting transformation is centered with respect to the fixed image and thetranslation aligns the centers of the two images. There are two options fordefining the centers of the images, either the physical centersof the two data sets (GEOMETRY), or the centers defined by the intensity moments (MOMENTS).Two things to note about this filter, it requires the fixed and moving image have the same type even though it is not algorithmically required, and itsreturn type is the generic SimpleITK.Transform.
###Code
initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(fixed_image,moving_image.GetPixelID()),
moving_image,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
# Save moving image after initial transform and view overlap using external viewer.
save_transform_and_image(initial_transform, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "initialAlignment"))
###Output
_____no_output_____
###Markdown
Look at the transformation, what type is it?
###Code
print(initial_transform)
###Output
_____no_output_____
###Markdown
Final registration Version 1 Single scale (not using image pyramid). Initial transformation is not modified in place.Illustrate the need for scaling the step size differently for each parameter: SetOptimizerScalesFromIndexShift - estimated from maximum shift of voxel indexes (only use if data is isotropic). SetOptimizerScalesFromPhysicalShift - estimated from maximum shift of physical locations of voxels. SetOptimizerScalesFromJacobian - estimated from the averaged squared norm of the Jacobian w.r.t. parameters.Look at the optimizer's stopping condition to ensure we have not terminated prematurely.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100)
# Scale the step size differently for each parameter, this is critical!!!
registration_method.SetOptimizerScalesFromPhysicalShift()
registration_method.SetInitialTransform(initial_transform, inPlace=False)
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
final_transform_v1 = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform_v1, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v1"))
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it?
###Code
print(final_transform_v1)
###Output
_____no_output_____
###Markdown
Version 1.1The previous example illustrated the use of the ITK v4 registration framework in an ITK v3 manner. We only referred to a single transformation which was what we optimized.In ITK v4 the registration method accepts three transformations (if you look at the diagram above you will only see two transformations, Moving transform represents $T_{opt} \circ T_m$):SetInitialTransform, $T_{opt}$ - composed with the moving initial transform, maps points from the virtual image domain to the moving image domain, modified during optimization. SetFixedInitialTransform $T_f$- maps points from the virtual image domain to the fixed image domain, never modified.SetMovingInitialTransform $T_m$- maps points from the virtual image domain to the moving image domain, never modified.The transformation that maps points from the fixed to moving image domains is thus: $^M\mathbf{p} = T_{opt}(T_m(T_f^{-1}(^F\mathbf{p})))$We now modify the previous example to use $T_{opt}$ and $T_m$.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100)
registration_method.SetOptimizerScalesFromPhysicalShift()
# Set the initial moving and optimized transforms.
optimized_transform = sitk.Euler3DTransform()
registration_method.SetMovingInitialTransform(initial_transform)
registration_method.SetInitialTransform(optimized_transform)
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
# Need to compose the transformations after registration.
final_transform_v11 = sitk.Transform(optimized_transform)
final_transform_v11.AddTransform(initial_transform)
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform_v11, fixed_image, moving_image, os.path.join(OUTPUT_DIR, "finalAlignment-v1.1"))
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it? Why is it different from the previous example?
###Code
print(final_transform_v11)
###Output
_____no_output_____
###Markdown
Version 2 Multi scale - specify both scale, and how much to smooth with respect to original image. Initial transformation modified in place, so in the end we have the same type of transformation in hand.
###Code
registration_method = sitk.ImageRegistrationMethod()
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100) #, estimateLearningRate=registration_method.EachIteration)
registration_method.SetOptimizerScalesFromPhysicalShift()
final_transform = sitk.Euler3DTransform(initial_transform)
registration_method.SetInitialTransform(final_transform)
registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas = [2,1,0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
registration_method.AddCommand(sitk.sitkStartEvent, registration_callbacks.metric_start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, registration_callbacks.metric_end_plot)
registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent,
registration_callbacks.metric_update_multires_iterations)
registration_method.AddCommand(sitk.sitkIterationEvent,
lambda: registration_callbacks.metric_plot_values(registration_method))
registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
# Save moving image after registration and view overlap using external viewer.
save_transform_and_image(final_transform, fixed_image, moving_image, os.path.join(OUTPUT_DIR, 'finalAlignment-v2'))
###Output
_____no_output_____
###Markdown
Look at the final transformation, what type is it?
###Code
print(final_transform)
###Output
_____no_output_____ |
Interns/Sarah/Cheers_Challenges_function.ipynb | ###Markdown
###Code
#import library
import random
#make list with all our names
names = ["Olivia", "Maria", "Anna-Claire", "Eliza", "Sarah", "Elise"]
def CheersChallenges(names):
#randomly selects a string within the list
name = random.choice(names)
return name
CheersChallenges(names)
###Output
_____no_output_____ |
5. Machine Learning without Sampling.ipynb | ###Markdown
Load Data
###Code
train = pd.read_csv('./data/train_clean.csv')
test = pd.read_csv('./data/test_clean.csv')
# imbalanced dataset
target1 = train['target'].sum()
target0 = (1 - train['target']).sum()
print('Train:\t', train.shape)
print('Test:\t', test.shape, '\n')
print('Target 0:\t', target0, '\t', np.round(target0 / len(train), 4))
print('Target 1:\t', target1, '\t', np.round(target1 / len(train), 4))
print('0/1 Ratio:\t', np.round(target0 / target1, 4))
# define categorical and numerical features
cat_features = ['term', 'home_ownership', 'verification_status', 'purpose',
'title', 'addr_state', 'initial_list_status', 'application_type']
num_features = ['sub_grade', 'loan_amnt', 'loan_to_inc', 'int_rate', 'installment_ratio',
'emp_length', 'annual_inc', 'dti', 'delinq_2yrs', 'inq_last_6mths',
'open_acc', 'pub_rec', 'revol_bal', 'revol_util', 'total_acc',
'collections_12_mths_ex_med', 'acc_now_delinq', 'tot_coll_amt',
'tot_cur_bal', 'total_rev_hi_lim', 'acc_open_past_24mths',
'avg_cur_bal', 'bc_open_to_buy', 'bc_util', 'chargeoff_within_12_mths',
'delinq_amnt', 'mo_sin_old_il_acct','mo_sin_old_rev_tl_op',
'mo_sin_rcnt_rev_tl_op', 'mo_sin_rcnt_tl', 'mort_acc',
'mths_since_recent_bc', 'mths_since_recent_inq', 'num_accts_ever_120_pd',
'num_actv_bc_tl', 'num_actv_rev_tl', 'num_bc_sats', 'num_bc_tl',
'num_il_tl', 'num_op_rev_tl', 'num_rev_accts', 'num_rev_tl_bal_gt_0',
'num_sats', 'num_tl_120dpd_2m', 'num_tl_30dpd', 'num_tl_90g_dpd_24m',
'num_tl_op_past_12m', 'pct_tl_nvr_dlq', 'percent_bc_gt_75',
'pub_rec_bankruptcies', 'tax_liens', 'tot_hi_cred_lim', 'credit_length',
'total_bal_ex_mort', 'total_bc_limit', 'total_il_high_credit_limit']
features = cat_features + num_features
# define numerical and categorical features
print('Categorical feature:\t', len(cat_features))
print('Numerical feature:\t', len(num_features))
print('Total feature:\t\t', len(features))
###Output
Categorical feature: 8
Numerical feature: 56
Total feature: 64
###Markdown
I. H2O Data Preparation
###Code
# initialize H2O cluster
h2o.init(nthreads=-1, max_mem_size='50G')
h2o.remove_all()
# transform to H2O Frame, and make sure the target variable is categorical
h2o_train = H2OFrame(train[features + ['target']])
h2o_test = H2OFrame(test[features])
# transform into categorical
h2o_train['target'] = h2o_train['target'].asfactor()
for name in cat_features:
h2o_train[name] = h2o_train[name].asfactor()
h2o_test[name] = h2o_test[name].asfactor()
###Output
Parse progress: |█████████████████████████████████████████████████████████| 100%
Parse progress: |█████████████████████████████████████████████████████████| 100%
###Markdown
Logistic Regression
###Code
# create GLM model with 5-folder cross-validation
glm = H2OGeneralizedLinearEstimator(family='binomial', early_stopping=True, nfolds=5,
balance_classes=False, custom_metric_func='auc',
keep_cross_validation_predictions=True, seed=42)
# train logistic regression model using grid search
hyper_parameters = {'alpha': [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
'lambda': [0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1]}
# grid search
glm_grid = H2OGridSearch(glm, hyper_parameters)
glm_grid.train(x=features, y='target', training_frame=h2o_train)
# get the grid search result, sorted by AUC decreasing
sorted_glm_grid = glm_grid.get_grid(sort_by='auc', decreasing=True)
best_alpha = sorted_glm_grid.sorted_metric_table()['alpha'][0]
best_lambda = sorted_glm_grid.sorted_metric_table()['lambda'][0]
best_glm_auc = sorted_glm_grid.sorted_metric_table()['auc'][0]
print('Best alpha:\t', best_alpha)
print('Best lambda:\t', best_lambda)
print('Best AUC:\t', best_glm_auc)
# re-build the logistic regression model with best parameters
logit = H2OGeneralizedLinearEstimator(family='binomial', balance_classes=False,
alpha=0.6, lambda_=1.0E-6, seed=42)
logit.train(x=features, y='target', training_frame=h2o_train)
# make prediction
logit_pred = logit.predict(h2o_test).as_data_frame()['p1'].values
# build the ROC curve
logit_fpr, logit_tpr, _ = roc_curve(test['target'].values, logit_pred)
logit_auc = np.round(auc(logit_fpr, logit_tpr), 4)
np.save('./result/h2o_logistic_fpr_no_sampling.npy', logit_fpr)
np.save('./result/h2o_logistic_tpr_no_sampling.npy', logit_tpr)
# visualization
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(logit_fpr, logit_tpr, label='AUC: ' + str(logit_auc))
ax.plot(logit_fpr, logit_fpr, 'k:')
ax.set_xlabel('False Positive Rate', fontsize=12)
ax.set_ylabel('True Positive Rate', fontsize=12)
ax.legend(fontsize=12)
ax.grid(True)
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Random ForestSince it's too slow to run this module, the number of trees `ntrees` are only set to be $100$.
###Code
# random forest grid search
hyper_parameters = {'max_depth': [15, 20, 25, 30],
'mtries': [-1, 10, 15, 20, 25],
'sample_rate': [0.5, 0.632, 0.75]}
# build random forest model
rf_model = H2ORandomForestEstimator(balance_classes=False, ntrees=100, nfolds=5,
stopping_rounds=5, stopping_metric='auc',
keep_cross_validation_predictions=True)
# define search criteria
search_criteria = {'strategy': 'RandomDiscrete', 'max_models': 10, 'seed': 42,
'stopping_metric': 'auc', 'stopping_rounds': 5}
# grid search
rf_grid = H2OGridSearch(rf_model, hyper_parameters, search_criteria=search_criteria)
rf_grid.train(x=features, y='target', training_frame=h2o_train)
# get the grid search result, sorted by AUC decreasing
sorted_rf_grid = rf_grid.get_grid(sort_by='auc', decreasing=True)
best_max_depth = sorted_rf_grid.sorted_metric_table()['max_depth'][0]
best_mtries = sorted_rf_grid.sorted_metric_table()['mtries'][0]
best_sample_rate = sorted_rf_grid.sorted_metric_table()['sample_rate'][0]
best_rf_auc = sorted_rf_grid.sorted_metric_table()['auc'][0]
print('Best max_depth:\t\t', best_max_depth)
print('Best mtries:\t\t', best_mtries)
print('Best sample_rate:\t', best_sample_rate)
print('Best AUC:\t\t', best_rf_auc)
# build random forest model with best parameters
h2o_rf = H2ORandomForestEstimator(balance_classes=False, max_depth=15, mtries=15,
sample_rate=0.75, ntrees=1000, seed=42)
h2o_rf.train(x=features, y='target', training_frame=h2o_train)
# make prediction
h2o_rf_pred = h2o_rf.predict(h2o_test).as_data_frame()['p1'].values
# get feature importance details
h2o_rf_importance = h2o_rf.varimp(use_pandas=True)
# visualization
fig, ax = plt.subplots(figsize=(10, 8))
sns.barplot(x='scaled_importance', y='variable', data=h2o_rf_importance[:15], ax=ax)
ax.set_title('LightGBM Random Forest Feature Importance', fontsize=16)
ax.set_xlabel('Relative Importance', fontsize=12)
ax.set_ylabel('')
ax.grid(True)
plt.tight_layout()
plt.show()
# build the ROC curve
h2o_rf_fpr, h2o_rf_tpr, _ = roc_curve(test['target'].values, h2o_rf_pred)
h2o_rf_auc = np.round(auc(h2o_rf_fpr, h2o_rf_tpr), 4)
np.save('./result/h2o_rf_fpr_no_sampling.npy', h2o_rf_fpr)
np.save('./result/h2o_rf_tpr_no_sampling.npy', h2o_rf_tpr)
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(h2o_rf_fpr, h2o_rf_tpr, label='AUC: ' + str(h2o_rf_auc))
ax.plot(h2o_rf_fpr, h2o_rf_fpr, 'k:')
ax.set_xlabel('False Positive Rate', fontsize=12)
ax.set_ylabel('True Positive Rate', fontsize=12)
ax.legend(fontsize=12)
ax.grid(True)
plt.tight_layout()
plt.show()
# Shutdown h2o instance
h2o.cluster().shutdown()
###Output
H2O session _sid_94e9 closed.
###Markdown
II. LightGBM Data Preparation
###Code
# encoding categorical data into numerical format
label_encoders = []
for name in cat_features:
encoder = LabelEncoder()
train[name] = encoder.fit_transform(train[name])
test[name] = encoder.transform(test[name])
label_encoders.append(encoder)
# create LightGBM dataset
train_x = train[features]
train_y = train['target'].values
gbm_train = lgb.Dataset(data=train_x, label=train_y, feature_name=features,
categorical_feature=cat_features, free_raw_data=False)
###Output
_____no_output_____
###Markdown
Random Forest
###Code
# define parameter space to explore
rf_num_leaves_list = [20, 30, 40, 50, 60]
rf_max_depth_list = [-1, 20, 30, 40, 50]
rf_min_data_in_leaf_list = [20, 30, 40, 50]
rf_bagging_frac_list = [0.5, 0.632, 0.7, 0.8]
rf_feature_frac_list = [0.4, 0.5, 0.6, 0.7, 0.8]
rf_num_leaves_vals = []
rf_max_depth_vals = []
rf_min_data_vals = []
rf_bagging_frac_vals = []
rf_feature_frac_vals = []
rf_mean_auc = []
rf_std_auc = []
# Random search with Cross validation
s = '|{0:^10s} |{1:^9s} |{2:^16s} |{3:^16s} |{4:^16s} |{5:^6s} |{6:^6s} |'
print(s.format('num_leaves', 'max_depth', 'min_data_in_leaf', 'bagging_fraction',
'feature_fraction', 'AUC', 'std'))
print('-' * 94)
# perform random search for given number n
n = 30
np.random.seed(42)
visited = set()
for i in range(n):
while True:
num_leaves = np.random.choice(rf_num_leaves_list)
max_depth = np.random.choice(rf_max_depth_list)
min_data_in_leaf = np.random.choice(rf_min_data_in_leaf_list)
bagging_fraction = np.random.choice(rf_bagging_frac_list)
feature_fraction = np.random.choice(rf_feature_frac_list)
tuples = (num_leaves, max_depth, min_data_in_leaf, bagging_fraction, feature_fraction)
if tuples not in visited:
visited.add(tuples)
break
params = {'objective': 'binary',
'boosting': 'rf',
'num_threads': 16,
'is_unbalance': False,
'metric': ['auc'],
'learning_rate': 0.1,
'max_bin': 255,
'num_leaves': num_leaves,
'max_depth': max_depth,
'min_data_in_leaf': min_data_in_leaf,
'bagging_fraction': bagging_fraction,
'feature_fraction': feature_fraction,
'bagging_freq': 1,
'lambda_l1': 0.0,
'lambda_l2': 0.0,
'drop_rate': 0.1,
'seed': 42}
# 5-folder cross validation (no early stopping)
history = lgb.cv(params, train_set=gbm_train, nfold=5, num_boost_round=1000,
stratified=True, early_stopping_rounds=None, verbose_eval=False,
seed=42, feature_name=features, categorical_feature=cat_features)
# get result
rf_num_leaves_vals.append(num_leaves)
rf_max_depth_vals.append(max_depth)
rf_min_data_vals.append(min_data_in_leaf)
rf_bagging_frac_vals.append(bagging_fraction)
rf_feature_frac_vals.append(feature_fraction)
rf_mean_auc.append(history['auc-mean'][-1])
rf_std_auc.append(history['auc-stdv'][-1])
# output the resuts
ss = '|{0:10d} |{1:9d} |{2:16d} |{3:16.4f} |{4:16.4f} |{5:6.4f} |{6:6.4f} |'
print(ss.format(num_leaves, max_depth, min_data_in_leaf, bagging_fraction,
feature_fraction, history['auc-mean'][-1], history['auc-stdv'][-1]))
# get the best parameters
idx = np.argmax(rf_mean_auc)
print('-' * 94)
print(ss.format(rf_num_leaves_vals[idx], rf_max_depth_vals[idx], rf_min_data_vals[idx],
rf_bagging_frac_vals[idx], rf_feature_frac_vals[idx], rf_mean_auc[idx],
rf_std_auc[idx]))
# define best parameters
params = {'objective': 'binary',
'boosting': 'rf',
'num_threads': 16,
'is_unbalance': False,
'metric': ['auc'],
'learning_rate': 0.1,
'max_bin': 255,
'num_leaves': 60,
'max_depth': 20,
'min_data_in_leaf': 40,
'bagging_fraction': 0.5,
'feature_fraction': 0.5,
'bagging_freq': 1,
'lambda_l1': 0.0,
'lambda_l2': 0.0,
'drop_rate': 0.1,
'seed': 42}
# re-train the model and make predictions
lgb_rf = lgb.train(params, train_set=gbm_train, num_boost_round=1000,
feature_name=features, categorical_feature=cat_features)
lgb_rf_pred = lgb_rf.predict(test[features])
# get feature importance details
importance = lgb_rf.feature_importance()
lgb_rf_importance = pd.DataFrame({'feature': features, 'importance': importance},
columns=['feature', 'importance'])
lgb_rf_importance = lgb_rf_importance.sort_values(by='importance', ascending=False)
lgb_rf_importance['importance'] = lgb_rf_importance['importance'] / lgb_rf_importance['importance'].max()
# visualization
fig, ax = plt.subplots(figsize=(10, 8))
sns.barplot(x='importance', y='feature', data=lgb_rf_importance[:15], ax=ax)
ax.set_title('LightGBM Random Forest Feature Importance', fontsize=16)
ax.set_xlabel('Relative Importance', fontsize=12)
ax.set_ylabel('')
ax.grid(True)
plt.tight_layout()
plt.show()
# build the ROC curve
lgb_rf_fpr, lgb_rf_tpr, _ = roc_curve(test['target'].values, lgb_rf_pred)
lgb_rf_auc = np.round(auc(lgb_rf_fpr, lgb_rf_tpr), 4)
np.save('./result/lgb_rf_fpr_no_sampling.npy', lgb_rf_fpr)
np.save('./result/lgb_rf_tpr_no_sampling.npy', lgb_rf_tpr)
# visualization
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(lgb_rf_fpr, lgb_rf_tpr, label='AUC: ' + str(lgb_rf_auc))
ax.plot(lgb_rf_fpr, lgb_rf_fpr, 'k:')
ax.set_xlabel('False Positive Rate', fontsize=12)
ax.set_ylabel('True Positive Rate', fontsize=12)
ax.legend(fontsize=12)
ax.grid(True)
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Boosting
###Code
# define parameter space to explore
gbm_learning_rate_list = [0.03, 0.05, 0.1]
gbm_num_leaves_list = [20, 30, 40, 50, 60]
gbm_max_depth_list = [-1, 10, 20, 30, 40]
gbm_min_data_in_leaf_list = [20, 30, 40, 50]
gbm_learning_rate_vals = []
gbm_num_leaves_vals = []
gbm_max_depth_vals = []
gbm_min_data_vals = []
gbm_best_rounds = []
gbm_mean_auc = []
gbm_std_auc = []
# Random search with Cross validation
s = '| {0:^3s} | {1:^10s} | {2:^10s} | {3:^16s} | {4:^12s}| {5:^6s} | {6:^6s} |'
print(s.format('learning_rate', 'num_leaves', 'max_depth', 'min_data_in_leaf',
'best_rounds', 'AUC', 'std'))
print('-' * 94)
# perform random search for given number n
n = 30
np.random.seed(42)
visited = set()
for i in range(n):
while True:
learning_rate = np.random.choice(gbm_learning_rate_list)
num_leaves = np.random.choice(gbm_num_leaves_list)
max_depth = np.random.choice(gbm_max_depth_list)
min_data_in_leaf = np.random.choice(gbm_min_data_in_leaf_list)
tuples = (learning_rate, num_leaves, max_depth, min_data_in_leaf)
if tuples not in visited:
visited.add(tuples)
break
params = {'objective': 'binary',
'boosting': 'gbdt',
'num_threads': 16,
'is_unbalance': False,
'metric': ['auc'],
'max_bin': 255,
'learning_rate': learning_rate,
'num_leaves': num_leaves,
'max_depth': max_depth,
'min_data_in_leaf': min_data_in_leaf,
'bagging_fraction': 1.0,
'feature_fraction': 1.0,
'bagging_freq': 0,
'lambda_l1': 0.0,
'lambda_l2': 0.0,
'drop_rate': 0.1,
'seed': 42}
# 5-folder cross validation (no early stopping)
history = lgb.cv(params, train_set=gbm_train, nfold=5, num_boost_round=1000,
stratified=True, early_stopping_rounds=10, verbose_eval=False,
seed=42, feature_name=features, categorical_feature=cat_features)
# get result
gbm_learning_rate_vals.append(learning_rate)
gbm_num_leaves_vals.append(num_leaves)
gbm_max_depth_vals.append(max_depth)
gbm_min_data_vals.append(min_data_in_leaf)
gbm_best_rounds.append(len(history['auc-mean']))
gbm_mean_auc.append(history['auc-mean'][-1])
gbm_std_auc.append(history['auc-stdv'][-1])
# output the resuts
ss = '| {0:>13.5f} | {1:>10d} | {2:>10d} | {3:>16d} | {4:>12d}| {5:>6.4f} | {6:>6.4f} |'
print(ss.format(learning_rate, num_leaves, max_depth, min_data_in_leaf,
len(history['auc-mean']), history['auc-mean'][-1],
history['auc-stdv'][-1]))
# get the best parameters
idx = np.argmax(gbm_mean_auc)
print('-' * 94)
print(ss.format(gbm_learning_rate_vals[idx], gbm_num_leaves_vals[idx], gbm_max_depth_vals[idx],
gbm_min_data_vals[idx], gbm_best_rounds[idx], gbm_mean_auc[idx], gbm_std_auc[idx]))
# define best parameters
params = {'objective': 'binary',
'boosting': 'gbdt',
'num_threads': 4,
'is_unbalance': False,
'metric': ['auc'],
'max_bin': 255,
'learning_rate': 0.03,
'num_leaves': 60,
'max_depth': 10,
'min_data_in_leaf': 40,
'bagging_fraction': 1.0,
'feature_fraction': 1.0,
'bagging_freq': 0,
'lambda_l1': 0.0,
'lambda_l2': 0.0,
'drop_rate': 0.1,
'seed': 42}
# re-train the model and make predictions
lgb_gbm = lgb.train(params, train_set=gbm_train, num_boost_round=986,
feature_name=features, categorical_feature=cat_features)
lgb_gbm_pred = lgb_gbm.predict(test[features])
# get feature importance details
importance = lgb_gbm.feature_importance()
lgb_gbm_importance = pd.DataFrame({'feature': features, 'importance': importance},
columns=['feature', 'importance'])
lgb_gbm_importance = lgb_gbm_importance.sort_values(by='importance', ascending=False)
lgb_gbm_importance['importance'] = lgb_gbm_importance['importance'] / lgb_gbm_importance['importance'].max()
# visualization
fig, ax = plt.subplots(figsize=(10, 8))
sns.barplot(x='importance', y='feature', data=lgb_gbm_importance[:15], ax=ax)
ax.set_title('LightGBM GBM Feature Importance', fontsize=16)
ax.set_xlabel('Relative Importance', fontsize=12)
ax.set_ylabel('')
ax.grid(True)
plt.tight_layout()
plt.show()
# build the ROC curve
lgb_gbm_fpr, lgb_gbm_tpr, _ = roc_curve(test['target'].values, lgb_gbm_pred)
lgb_gbm_auc = np.round(auc(lgb_gbm_fpr, lgb_gbm_tpr), 4)
np.save('./result/lgb_gbm_fpr_no_sampling.npy', lgb_gbm_fpr)
np.save('./result/lgb_gbm_tpr_no_sampling.npy', lgb_gbm_tpr)
# visualization
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(lgb_gbm_fpr, lgb_gbm_tpr, label='AUC: ' + str(lgb_gbm_auc))
ax.plot(lgb_gbm_fpr, lgb_gbm_fpr, 'k:')
ax.set_xlabel('False Positive Rate', fontsize=12)
ax.set_ylabel('True Positive Rate', fontsize=12)
ax.legend(fontsize=12)
ax.grid(True)
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
III. CatBoost Data Preparation
###Code
# create Pool object
train_pool = Pool(data=train[features], label=train['target'].values, feature_names=features,
cat_features=np.array(range(len(cat_features))))
test_pool = Pool(data=test[features], feature_names=features,
cat_features=np.array(range(len(cat_features))))
###Output
_____no_output_____
###Markdown
Boosting
###Code
# define parameter space to explore
learning_rate_list = [0.03, 0.05, 0.08, 0.1]
depth_list = [4, 5, 6, 7, 8, 9, 10]
l2_leaf_reg_list = [1, 3, 5, 7, 9]
random_strength_list = [0.1, 0.5, 1, 2]
bagging_temperature_list = [0, 0.2, 0.4, 0.6, 0.8, 1.0]
learning_rate_values = []
depth_values = []
l2_leaf_reg_values = []
random_strength_values = []
bagging_temperature_values = []
best_iterations_values = []
train_mean_auc_values = []
test_mean_auc_values = []
# Random search with Cross validation
s = '|{0:>13s} |{1:>5s} |{2:>11s} |{3:>8s} |{4:>11s} |{5:>10s} |{6:>9s} |{7:>9s} |'
print(s.format('learning_rate', 'depth', 'l2_leaf_reg', 'strength', 'temperature',
'iterations', 'train_AUC', 'test_AUC'))
print('-' * 93)
# perform random search for given number n
n = 30
np.random.seed(42)
visited = set()
for i in range(n):
while True:
learning_rate = np.random.choice(learning_rate_list)
depth = np.random.choice(depth_list)
l2_leaf_reg = np.random.choice(l2_leaf_reg_list)
random_strength = np.random.choice(random_strength_list)
bagging_temperature = np.random.choice(bagging_temperature_list)
tuples = (learning_rate, depth, l2_leaf_reg, random_strength, bagging_temperature)
if tuples not in visited:
visited.add(tuples)
break
# define parameters
params = {'loss_function': 'Logloss',
'custom_metric': 'AUC',
'eval_metric': 'AUC',
'learning_rate': learning_rate,
'depth': depth,
'l2_leaf_reg': l2_leaf_reg,
'random_strength': random_strength,
'bagging_temperature': bagging_temperature,
'random_seed': 42,
'bootstrap_type': 'Bayesian',
'has_time': False}
scores = cv(pool=train_pool, params=params, iterations=1000, fold_count=5,
seed=42, shuffle=True, logging_level='Silent', stratified=True,
as_pandas=False, metric_period=1, early_stopping_rounds=5)
# get result
learning_rate_values.append(learning_rate)
depth_values.append(depth)
l2_leaf_reg_values.append(l2_leaf_reg)
random_strength_values.append(random_strength)
bagging_temperature_values.append(bagging_temperature)
best_idx = np.argmax(scores['test-AUC-mean'])
best_iterations = best_idx + 1
train_mean_auc = scores['train-AUC-mean'][best_idx]
test_mean_auc = scores['test-AUC-mean'][best_idx]
best_iterations_values.append(best_iterations)
train_mean_auc_values.append(train_mean_auc)
test_mean_auc_values.append(test_mean_auc)
# output the resuts
ss = '|{0:>13.4f} |{1:>5d} |{2:>11d} |{3:>8.4f} |{4:>11.4f} |{5:>10d} |{6:>9.4f} |{7:>9.4f} |'
print(ss.format(learning_rate, depth, l2_leaf_reg, random_strength, bagging_temperature,
best_iterations, train_mean_auc, test_mean_auc))
# get the best parameters
idx = np.argmax(test_mean_auc_values)
print('-' * 93)
print(ss.format(learning_rate_values[idx], depth_values[idx], l2_leaf_reg_values[idx],
random_strength_values[idx], bagging_temperature_values[idx],
best_iterations_values[idx], train_mean_auc_values[idx],
test_mean_auc_values[idx]))
# build CatBoost classifier
cat_gbm = CatBoostClassifier(loss_function='Logloss', custom_metric='AUC', eval_metric='AUC',
learning_rate=0.08, depth=7, l2_leaf_reg=9, random_strength=1.0,
bagging_temperature=0.4, iterations=923, random_seed=42,
class_weights=None, bootstrap_type='Bayesian')
cat_gbm.fit(X=train_pool, eval_set=None, logging_level='Verbose', plot=False,
column_description=None, metric_period=100, early_stopping_rounds=None)
cat_gbm_pred = cat_gbm.predict_proba(data=test_pool)[:, 1]
# get feature importance details
importance = cat_gbm.get_feature_importance(data=None, prettified=True)
names = []
vals = []
for name, val in importance:
names.append(str(name.decode('ASCII')))
vals.append(val)
cat_gbm_importance = pd.DataFrame({'feature': names, 'importance': vals},
columns=['feature', 'importance'])
cat_gbm_importance['importance'] = cat_gbm_importance['importance'] / cat_gbm_importance['importance'].max()
# visualization
fig, ax = plt.subplots(figsize=(10, 8))
sns.barplot(x='importance', y='feature', data=cat_gbm_importance[:15], ax=ax)
ax.set_title('CatBoost GBM Feature Importance', fontsize=16)
ax.set_xlabel('Relative Importance', fontsize=12)
ax.set_ylabel('')
ax.grid(True)
plt.tight_layout()
plt.show()
# build the ROC curve
cat_gbm_fpr, cat_gbm_tpr, _ = roc_curve(test['target'].values, cat_gbm_pred)
cat_gbm_auc = np.round(auc(cat_gbm_fpr, cat_gbm_tpr), 4)
np.save('./result/cat_gbm_fpr_no_sampling.npy', cat_gbm_fpr)
np.save('./result/cat_gbm_tpr_no_sampling.npy', cat_gbm_tpr)
# visualization
fig, ax = plt.subplots(figsize=(8, 6))
ax.plot(cat_gbm_fpr, cat_gbm_tpr, label='AUC: ' + str(cat_gbm_auc))
ax.plot(cat_gbm_fpr, cat_gbm_fpr, 'k:')
ax.set_xlabel('False Positive Rate', fontsize=12)
ax.set_ylabel('True Positive Rate', fontsize=12)
ax.legend(fontsize=12)
ax.grid(True)
plt.tight_layout()
plt.show()
###Output
_____no_output_____ |
extension/examples/9582250.ipynb | ###Markdown
https://www.kaggle.com/c/20-newsgroups-ciphertext-challenge/data Reading data
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
DATA_PATH = '../input'
df_train = pd.read_csv(DATA_PATH + '/train.csv', encoding='cp1252')
df_train.shape
df_train['ciphertext_len'] = df_train['ciphertext'].apply(lambda x: len([y.encode() for y in x]))
df_train.head()
###Output
_____no_output_____
###Markdown
Vectorizing
###Code
from sklearn.feature_extraction.text import TfidfVectorizer
%%time
vect = TfidfVectorizer(lowercase=False, analyzer='char', ngram_range=(1,5), max_features=30000)
X_train_features_sparse = vect.fit_transform(df_train['ciphertext'])
X_train_features_sparse
from scipy.sparse import hstack
X_train = X_train_features_sparse.tocsr()
X_train
y_train = df_train['target']
df_test = pd.read_csv(DATA_PATH + '/test.csv', encoding='cp1252')
%%time
X_test_features_sparse = vect.transform(df_test['ciphertext'])
X_test = X_test_features_sparse.tocsr()
X_test
del(vect)
###Output
_____no_output_____
###Markdown
Splitting data by difficulty
###Code
diffs = list(range(1, 5))
from sklearn.model_selection import train_test_split
def split_idx_by_column(df, column, valid_size=None):
idxs, idxs_valid = {}, {}
for d in diffs:
idx = df.index[df[column] == d]
if valid_size is None:
idxs[d] = idx
else:
idx, idx_valid = train_test_split(idx, random_state=42,
test_size=valid_size, stratify=df['target'][idx])
idxs[d] = idx
idxs_valid[d] = idx_valid
if valid_size is None:
return idxs
else:
return idxs, idxs_valid
train_idxs = split_idx_by_column(df_train, 'difficulty')
train_part_idxs, valid_idxs = split_idx_by_column(df_train, 'difficulty', valid_size=0.1)
test_idxs = split_idx_by_column(df_test, 'difficulty')
print('train part sizes:', [z.shape[0] for z in train_part_idxs.values()])
print('valid sizes:', [z.shape[0] for z in valid_idxs.values()])
print('test sizes:', [z.shape[0] for z in test_idxs.values()])
y_valid_to_concat = []
for d in diffs:
y_valid_to_concat.append(y_train.loc[valid_idxs[d]])
y_valid = pd.concat(y_valid_to_concat)
y_valid.sort_index(inplace=True)
y_valid.index
for d in diffs:
plt.figure()
plt.title(f'Difficulty {d}')
idx = train_part_idxs[d].values
plt.hist(y_train[idx], bins=20, normed=False, alpha=0.5)
idx = valid_idxs[d].values
plt.hist(y_train[idx], bins=20, normed=False, alpha=0.5)
###Output
_____no_output_____
###Markdown
LR
###Code
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MaxAbsScaler
from sklearn.linear_model import LogisticRegression
pipes = {}
for d in diffs:
pipe = Pipeline(memory=None, steps=[
('scaler', MaxAbsScaler(copy=False)),
('clf', LogisticRegression(solver='lbfgs', multi_class='multinomial', verbose=2, n_jobs=-1))
])
pipes[d] = pipe
def train(models, X, y, diff_idxs):
for d in diffs:
idx = diff_idxs[d].values
print(f'difficulty = {d}, samples = {idx.shape[0]}')
model = models[d]
model.fit(X[idx], y.loc[idx])
return models
%%time
train(pipes, X_train, y_train, train_part_idxs)
from sklearn.metrics import confusion_matrix
def predict(models, X, diff_idxs, show_graph=True, y_truth=None):
y_preds = {}
for d in diffs:
idx = diff_idxs[d].values
model = models[d]
y_pred = model.predict(X[idx])
y_preds[d] = pd.Series(data=y_pred, index=idx)
print(f'difficulty = {d}, valid_preds = {y_preds[d].shape}')
if show_graph:
plt.figure(figsize=(12,4))
plt.subplot(121)
plt.title(f'Difficulty {d}')
plt.hist(y_pred, bins=20, normed=False, label='pred', alpha=0.5)
if y_truth is not None:
plt.hist(y_truth[idx], bins=20, label='valid', alpha=0.5)
plt.gca().set_xticks(range(20))
plt.grid()
plt.legend()
if y_truth is not None:
cm = confusion_matrix(y_truth[idx], y_pred)
plt.subplot(122)
plt.imshow(cm)
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
y_pred_to_concat = []
for d in diffs:
y_pred_to_concat.append(y_preds[d])
y_pred = pd.concat(y_pred_to_concat)
y_pred.sort_index(inplace=True)
return y_pred
y_valid_pred = predict(pipes, X_train, valid_idxs, y_truth=y_valid)
from sklearn.metrics import f1_score, precision_recall_fscore_support
f1_score(y_valid, y_valid_pred, average='macro')
precision_recall_fscore_support(y_valid, y_valid_pred, average='macro')
plt.hist(y_valid, bins=20, label='valid', alpha=0.5)
plt.hist(y_valid_pred, bins=20, label='valid_pred', alpha=0.5)
plt.gca().set_xticks(range(20))
plt.grid()
plt.legend()
pass
cm = confusion_matrix(y_valid, y_valid_pred)
plt.figure()
plt.imshow(cm)
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
###Output
_____no_output_____
###Markdown
LR tuning hyperparams
###Code
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
cv = StratifiedKFold(2)
params = {
'clf__C': np.logspace(-2, 2, 5)
}
grids = {}
for d in diffs:
pipe = pipes[d]
grid = GridSearchCV(estimator=pipe, cv=cv, param_grid=params,
scoring='f1_macro', return_train_score=True, verbose=2)
grids[d] = grid
%%time
train(grids, X_train, y_train, train_idxs)
for d in diffs:
print(f'Difficulty = {d}')
print(grids[d].cv_results_)
###Output
_____no_output_____
###Markdown
Forecasting
###Code
models = {}
for d in diffs:
model = grids[d].best_estimator_
models[d] = model
print(f'Difficulty = {d}, C={model.steps[1][1].C}')
%%time
y_test_pred = predict(models, X_test, test_idxs)
plt.hist(y_train, bins=20, label='train', alpha=0.5, density=True)
plt.hist(y_test_pred, bins=20, label='pred', alpha=0.5, density=True)
plt.gca().set_xticks(range(20))
plt.grid()
plt.legend()
pass
df_subm = pd.read_csv(DATA_PATH +'/sample_submission.csv')
df_subm['Predicted'] = y_test_pred
df_subm.head()
df_subm.to_csv('submission.csv', index=False)
###Output
_____no_output_____ |
EcoFOCI_Moorings/ERDDAP_Automated_Tools/ERDDAPTable2Grid_nc.ipynb | ###Markdown
Demo Script for ERDDAP transformationsTake Mooring Timeseries data and grid to 1hr so parameter(time,depth) - which is 1hr, 1m traditionally for EcoFOCI. Do not interpolate in depth. Use ERDDAP as datasourceTake CTD Collection of casts and grid ? (is this useful - not really)**designed with akutan in mind**
###Code
from erddapy import ERDDAP
import pandas as pd
import numpy as np
import xarray as xa
server_url = 'http://akutan.pmel.noaa.gov:8080/erddap'
###Output
_____no_output_____
###Markdown
Loop through all datasets
###Code
e = ERDDAP(server=server_url)
df = pd.read_csv(e.get_search_url(response='csv', search_for='datasets_Mooring AND final'))
print(f"{df['Dataset ID'].count()} datasets to be looped through")
print("Names:", df['Dataset ID'].values)
from requests.exceptions import HTTPError
#build datasets.xml
import os
try:
os.remove("erddap_gridded_from_tabledap.xml")
except OSError:
pass
f=open("erddap_gridded_from_tabledap.xml", "a+")
output_path = '/home/akutan/bell/in_and_outbox/erddap_generated/gridded_data_from_tabledap/'
for dataset_id in sorted(df['Dataset ID'].values):
if (not 'itae' in dataset_id) and (not 'gridded' in dataset_id) and (not 'Met' in dataset_id):
print(f'Working on {dataset_id}')
#read and import dataset
#print(dataset_id)
try:
d = ERDDAP(server=server_url,
protocol='tabledap',
response='csv'
)
d.dataset_id=dataset_id
except HTTPError:
print('Failed to generate url {}'.format(dataset_id))
try:
df_m = d.to_pandas(
index_col='time (UTC)',
parse_dates=True,
skiprows=(1,) # units information can be dropped.
)
df_m.sort_index(inplace=True)
df_m.columns = [x[1].split()[0] for x in enumerate(df_m.columns)]
#-9999 and 1e35 are missing values... but erddap doesn't catch the -9999 yet
#and some 1e35 are off a bit
df_m.loc[(df_m.depth == -9999)] = np.nan # only in pressure field
except:
print(f"something failed in data download {dataset_id}")
pass
try:
dfr = df_m.groupby('depth').resample('1H').mean()
dfr.index.names = ['depth','date']
xfr = dfr.drop('depth',axis=1).to_xarray()
xfr['date'] = xfr.date.astype('datetime64[ns]')
xfr.to_netcdf(dataset_id+'.nc',encoding={'time':{'units':'hours since 1970-01-01'}})
upperID = dataset_id.split('_')[-2].upper()
timestart = str(xfr.date.min().values).replace('00.000000000','00Z')
#write out xml file
f.write(f"""
<dataset type="EDDGridFromNcFiles" datasetID="1hr_gridded_{dataset_id}" active="true">
<reloadEveryNMinutes>10080</reloadEveryNMinutes>
<updateEveryNMillis>10000</updateEveryNMillis>
<fileDir>{output_path}</fileDir>
<fileNameRegex>{dataset_id}.nc</fileNameRegex>
<recursive>false</recursive>
<pathRegex>.*</pathRegex>
<metadataFrom>last</metadataFrom>
<matchAxisNDigits>20</matchAxisNDigits>
<fileTableInMemory>false</fileTableInMemory>
<accessibleViaFiles>false</accessibleViaFiles>
<!-- sourceAttributes>
</sourceAttributes -->
<addAttributes>
<att name="cdm_data_type">Grid</att>
<att name="Conventions">COARDS, CF-1.6, ACDD-1.3</att>
<att name="infoUrl">https://pmel.noaa.gov/</att>
<att name="institution">NOAA/PMEL - EcoFOCI</att>
<att name="keywords">gridded, active, available, chemistry, chlorophyll, Chlorophyll_Fluorescence, color, concentration, concentration_of_chlorophyll_in_sea_water, data, date, density, depth, dissolved, dissolved o2, downwelling, downwelling_photosynthetic_photon_radiance_in_sea_water, earth, Earth Science > Oceans > Ocean Chemistry > Chlorophyll, Earth Science > Oceans > Ocean Optics > Photosynthetically Active Radiation, Earth Science > Oceans > Ocean Optics > Radiance, Earth Science > Oceans > Salinity/Density > Salinity, latitude, local, longitude, number, O2, ocean, ocean color, oceans, optical, optical properties, optics, oxygen, Oxygen_Concentration, Oxygen_Saturation, PAR, photon, photosynthetic, photosynthetically, practical, pressure, properties, radiance, radiation, salinity, saturation, science, sea, sea_water_practical_salinity, seawater, serial, Serial_Number, source, statistics, temperature, water</att>
<att name="keywords_vocabulary">GCMD Science Keywords</att>
<att name="license">[standard]</att>
<att name="standard_name_vocabulary">CF Standard Name Table v55</att>
<att name="summary">QC'd (final) mooring data from {upperID}. This dataset has been gridded to 1hr resolution (with pressure left at discrete depths). It uses only datasets from the associated erddap tabular {dataset_id}. A depth field exisists for each known instrument depth, even if the parameter wasn't measured at that depth. </att>
<att name="title">Gridded Mooring Data from {upperID} - final</att>
</addAttributes>
<axisVariable>
<sourceName>depth</sourceName>
<destinationName>depth</destinationName>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="ioos_category">Location</att>
<att name="long_name">Depth</att>
<att name="standard_name">depth</att>
<att name="units">m</att>
</addAttributes>
</axisVariable>
<axisVariable>
<sourceName>date</sourceName>
<destinationName>time</destinationName>
<!-- sourceAttributes>
<att name="calendar">proleptic_gregorian</att>
<att name="units">hours since 1970-01-01 00:00:00</att>
</sourceAttributes -->
<addAttributes>
<att name="ioos_category">Time</att>
<att name="long_name">Date</att>
<att name="source_name">date</att>
<att name="standard_name">time</att>
<att name="units">hours since 1970-01-01 00:00:00</att>
</addAttributes>
</axisVariable>
<dataVariable>
<sourceName>latitude</sourceName>
<destinationName>latitude</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="colorBarMaximum" type="double">90.0</att>
<att name="colorBarMinimum" type="double">-90.0</att>
<att name="ioos_category">Location</att>
<att name="long_name">Latitude</att>
<att name="standard_name">latitude</att>
<att name="units">degrees_north</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>longitude</sourceName>
<destinationName>longitude</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="colorBarMaximum" type="double">180.0</att>
<att name="colorBarMinimum" type="double">-180.0</att>
<att name="ioos_category">Location</att>
<att name="long_name">Longitude</att>
<att name="standard_name">longitude</att>
<att name="units">degrees_east</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>temperature</sourceName>
<destinationName>temperature</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="epic_key">T_20</att>
<att name="long_name">Sea temperature in-situ ITS-90 scale</att>
<att name="standard_name">sea_water_temperature</att>
<att name="units">degree_C</att>
<att name="colorBarMaximum" type="double">20.0</att>
<att name="colorBarMinimum" type="double">-2.0</att>
<att name="ioos_category">Temperature</att>
<att name="standard_name">sea_water_temperature</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>pressure</sourceName>
<destinationName>pressure</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="ioos_category">Sea Level</att>
<att name="long_name">Sea water pressure, equals 0 at sea-level</att>
<att name="standard_name">sea_water_pressure_due_to_sea_water</att>
<att name="units">dbar</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>salinity</sourceName>
<destinationName>salinity</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="colorBarMaximum" type="double">37.0</att>
<att name="colorBarMinimum" type="double">32.0</att>
<att name="ioos_category">Salinity</att>
<att name="long_name">Sea Water Practical Salinity</att>
<att name="standard_name">sea_water_practical_salinity</att>
<att name="units">PSU</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>PAR</sourceName>
<destinationName>PAR</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="colorBarMaximum" type="double">70.0</att>
<att name="colorBarMinimum" type="double">0.0</att>
<att name="ioos_category">Optical Properties</att>
<att name="long_name">Downwelling Photosynthetic Photon Radiance In Sea Water</att>
<att name="standard_name">downwelling_photosynthetic_photon_radiance_in_sea_water</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>Chlorophyll_Fluorescence</sourceName>
<destinationName>Chlorophyll_Fluorescence</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="colorBarMaximum" type="double">30.0</att>
<att name="colorBarMinimum" type="double">0.03</att>
<att name="colorBarScale">Log</att>
<att name="ioos_category">Ocean Color</att>
<att name="long_name">Concentration Of Chlorophyll In Sea Water</att>
<att name="standard_name">concentration_of_chlorophyll_in_sea_water</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>Oxygen_Saturation</sourceName>
<destinationName>Oxygen_Saturation</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="colorBarMaximum" type="double">100.0</att>
<att name="colorBarMinimum" type="double">0.0</att>
<att name="ioos_category">Dissolved O2</att>
<att name="units">percent</att>
<att name="standard_name">oxygen_saturation_over_air</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>Oxygen_Concentration</sourceName>
<destinationName>Oxygen_Concentration</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="ioos_category">Dissolved O2</att>
<att name="units">µmole/kg</att>
<att name="standard_name">volume_oxygen_in_solution_volume_of_sea_water</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>Serial_Number</sourceName>
<destinationName>Serial_Number</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="colorBarMaximum" type="double">100.0</att>
<att name="colorBarMinimum" type="double">0.0</att>
<att name="ioos_category">Statistics</att>
<att name="long_name">Serial Number</att>
</addAttributes>
</dataVariable>
</dataset>
""")
except:
print('Something failed in gridding this set')
print("Done")
###Output
Working on datasets_Mooring_00bs2a_final
Working on datasets_Mooring_00bs2w_final
Working on datasets_Mooring_00bs3w_final
Working on datasets_Mooring_00bs4s_final
Working on datasets_Mooring_00bs4w_final
Working on datasets_Mooring_00bs6a_final
Working on datasets_Mooring_00bsm2a_final
Working on datasets_Mooring_00bsp2s_final
Working on datasets_Mooring_00bsp3s_final
Working on datasets_Mooring_00bsp4s_final
Working on datasets_Mooring_00cb1a_final
Working on datasets_Mooring_00kc1a_final
Working on datasets_Mooring_00kc2a_final
Working on datasets_Mooring_00pa1a_final
Working on datasets_Mooring_01bs2c_final
Working on datasets_Mooring_01bs4a_final
Working on datasets_Mooring_01bs4b_final
Working on datasets_Mooring_01bsm2a_final
Working on datasets_Mooring_01bsp2b_final
Working on datasets_Mooring_01bsp2s_final
Working on datasets_Mooring_01gbm3a_final
Working on datasets_Mooring_01gbm3b_final
Working on datasets_Mooring_02bs2c_final
Working on datasets_Mooring_02bs4a_final
Working on datasets_Mooring_02bs4b_final
Working on datasets_Mooring_02bsm2a_final
Working on datasets_Mooring_02bsp2a_final
Working on datasets_Mooring_02bsp2b_final
Working on datasets_Mooring_02bsp2c_final
Working on datasets_Mooring_03amp1a_final
Working on datasets_Mooring_03amp1b_final
Working on datasets_Mooring_03amp2a_final
Working on datasets_Mooring_03amp2b_final
Working on datasets_Mooring_03amp3a_final
Working on datasets_Mooring_03amp3b_final
Working on datasets_Mooring_03amp4a_final
Working on datasets_Mooring_03amp4b_final
Working on datasets_Mooring_03bs2a_final
Working on datasets_Mooring_03bs2c_final
Working on datasets_Mooring_03bs4a_final
Working on datasets_Mooring_03bs4b_final
Working on datasets_Mooring_03bsm2a_final
Working on datasets_Mooring_03bsp2a_final
Working on datasets_Mooring_03bsp2b_final
Working on datasets_Mooring_03bsp2c_final
Working on datasets_Mooring_03bsp6a_final
Working on datasets_Mooring_03cb1a_final
Working on datasets_Mooring_03cb1b_final
Working on datasets_Mooring_03gb1a_final
Working on datasets_Mooring_03gb1b_final
Working on datasets_Mooring_03gb2a_final
Working on datasets_Mooring_03gb2b_final
Working on datasets_Mooring_03gb4a_final
Working on datasets_Mooring_03gb4b_final
Something failed in gridding this set
Working on datasets_Mooring_03gbm3a_final
Working on datasets_Mooring_03gbm3b_final
Working on datasets_Mooring_03gbp12a_final
Working on datasets_Mooring_03gbp12b_final
Working on datasets_Mooring_03gbp3a_final
Working on datasets_Mooring_03gbp3b_final
Working on datasets_Mooring_03gbp5a_final
Working on datasets_Mooring_03gbp5b_final
Working on datasets_Mooring_03gp32a_final
Working on datasets_Mooring_03gp32b_final
Working on datasets_Mooring_03gp34a_final
Working on datasets_Mooring_03gp34b_final
Working on datasets_Mooring_03gpp36a_final
Working on datasets_Mooring_03gpp36b_final
Working on datasets_Mooring_03gsp6a_final
Working on datasets_Mooring_03gsp7a_final
Working on datasets_Mooring_03gsp8a_final
Working on datasets_Mooring_03kc1a_final
Working on datasets_Mooring_03kc2a_final
Working on datasets_Mooring_03pa1a_final
Working on datasets_Mooring_03sg2a_final
Something failed in gridding this set
Working on datasets_Mooring_03sg3a_final
Working on datasets_Mooring_03sg3b_final
Working on datasets_Mooring_03sg5a_final
Working on datasets_Mooring_03sg5b_final
Working on datasets_Mooring_03sgp1a_final
Working on datasets_Mooring_03sgp1b_final
Working on datasets_Mooring_03ssp1a_final
Working on datasets_Mooring_03ssp1b_final
Working on datasets_Mooring_03ssp2a_final
Working on datasets_Mooring_03ssp2b_final
Working on datasets_Mooring_03ssp3a_final
Working on datasets_Mooring_03ssp3b_final
Working on datasets_Mooring_04bs2c_final
Working on datasets_Mooring_04bs4a_final
Working on datasets_Mooring_04bs4b_final
Working on datasets_Mooring_04bsm2a_final
Working on datasets_Mooring_04bsp2a_final
Working on datasets_Mooring_04bsp2b_final
Working on datasets_Mooring_04bsp5a_final
Working on datasets_Mooring_04cb1a_final
Working on datasets_Mooring_04gb1a_final
Working on datasets_Mooring_04gbi2a_final
Working on datasets_Mooring_04gbm3a_final
Working on datasets_Mooring_04gbp3a_final
Working on datasets_Mooring_04gbp5a_final
Working on datasets_Mooring_04gbt1a_final
Working on datasets_Mooring_04gbt2a_final
Working on datasets_Mooring_04gbt4a_final
Working on datasets_Mooring_04gp32a_final
Working on datasets_Mooring_04gp34a_final
Working on datasets_Mooring_04gpp36a_final
Working on datasets_Mooring_04kc2a_final
Working on datasets_Mooring_04pa1a_final
Working on datasets_Mooring_04pi7a_final
Working on datasets_Mooring_04pi8a_final
Working on datasets_Mooring_04pip1a_final
Working on datasets_Mooring_04pip2a_final
Working on datasets_Mooring_04pip3a_final
Working on datasets_Mooring_04pip4a_final
Working on datasets_Mooring_04pip5a_final
Working on datasets_Mooring_04pip6a_final
Working on datasets_Mooring_04ssp2a_final
Working on datasets_Mooring_04ssp3a_final
Working on datasets_Mooring_04stl1a_final
Working on datasets_Mooring_04stl1b_final
Working on datasets_Mooring_05amp1a_final
Working on datasets_Mooring_05amp2a_final
Working on datasets_Mooring_05amp3a_final
Working on datasets_Mooring_05amp4a_final
Working on datasets_Mooring_05bs2c_final
Working on datasets_Mooring_05bs4a_final
Working on datasets_Mooring_05bs4b_final
Working on datasets_Mooring_05bs5a_final
Working on datasets_Mooring_05bs5b_final
Working on datasets_Mooring_05bs8a_final
Working on datasets_Mooring_05bs8b_final
Working on datasets_Mooring_05bsm2a_final
Working on datasets_Mooring_05bsp2a_final
Working on datasets_Mooring_05bsp2b_final
Working on datasets_Mooring_05bsp5b_final
Working on datasets_Mooring_05bsp8a_final
Working on datasets_Mooring_05bsp8b_final
Working on datasets_Mooring_05cb1a_final
Working on datasets_Mooring_05csp1a_final
Working on datasets_Mooring_05gbi1a_final
Working on datasets_Mooring_05kc1a_final
Working on datasets_Mooring_05kc2a_final
Working on datasets_Mooring_05pa1a_final
Working on datasets_Mooring_05ssp1a_final
Working on datasets_Mooring_05ssp2a_final
Working on datasets_Mooring_05ssp3a_final
Working on datasets_Mooring_06amp1a_final
Working on datasets_Mooring_06amp2a_final
Working on datasets_Mooring_06amp3a_final
Working on datasets_Mooring_06amp4a_final
Working on datasets_Mooring_06bs2c_final
Working on datasets_Mooring_06bs4a_final
Working on datasets_Mooring_06bs4b_final
Working on datasets_Mooring_06bs5a_final
Working on datasets_Mooring_06bs5b_final
Working on datasets_Mooring_06bs8a_final
Working on datasets_Mooring_06bsm2a_final
Working on datasets_Mooring_06bsp2a_final
Working on datasets_Mooring_06bsp2d_final
Working on datasets_Mooring_06bsp4a_final
Working on datasets_Mooring_06bsp5b_final
Working on datasets_Mooring_06bsp8a_final
Working on datasets_Mooring_06bst2a_final
Working on datasets_Mooring_06cb1a_final
Working on datasets_Mooring_06chp1a_final
Working on datasets_Mooring_06kc1a_final
Working on datasets_Mooring_06kc2a_final
Working on datasets_Mooring_06pa1a_final
Working on datasets_Mooring_06ssp1a_final
Working on datasets_Mooring_06ssp2a_final
Working on datasets_Mooring_06ssp3a_final
Working on datasets_Mooring_07amp2a_final
Working on datasets_Mooring_07amp3a_final
Working on datasets_Mooring_07amp4a_final
Working on datasets_Mooring_07bs2c_final
Working on datasets_Mooring_07bs4a_final
Working on datasets_Mooring_07bs4b_final
Working on datasets_Mooring_07bs5a_final
Working on datasets_Mooring_07bs5b_final
Working on datasets_Mooring_07bs8a_final
Working on datasets_Mooring_07bsm2a_final
Working on datasets_Mooring_07bsp4a_final
Working on datasets_Mooring_07bsp4b_final
Working on datasets_Mooring_07bsp5a_final
Working on datasets_Mooring_07bsp5b_final
Working on datasets_Mooring_07bsp8a_final
Working on datasets_Mooring_07bst2a_final
Working on datasets_Mooring_07cb1a_final
Working on datasets_Mooring_07kc2a_final
Working on datasets_Mooring_07pa1a_final
Working on datasets_Mooring_08amp1a_final
Working on datasets_Mooring_08amp2a_final
Working on datasets_Mooring_08amp3a_final
Working on datasets_Mooring_08amp4a_final
Working on datasets_Mooring_08bs2c_final
Working on datasets_Mooring_08bs4a_final
Working on datasets_Mooring_08bs4b_final
Working on datasets_Mooring_08bs5b_final
Working on datasets_Mooring_08bs8a_final
Working on datasets_Mooring_08bsm2a_final
Working on datasets_Mooring_08bsp2b_final
Working on datasets_Mooring_08bsp4a_final
Working on datasets_Mooring_08bsp4b_final
Working on datasets_Mooring_08bsp5b_final
Working on datasets_Mooring_08bsp8a_final
Working on datasets_Mooring_08bsp9a_final
Working on datasets_Mooring_08bst2a_final
Working on datasets_Mooring_08bsv5a_final
Working on datasets_Mooring_08bsv8a_final
Working on datasets_Mooring_08cb1a_final
Working on datasets_Mooring_08kc2a_final
Working on datasets_Mooring_08pa1a_final
Working on datasets_Mooring_08sbp1a_final
Working on datasets_Mooring_09bs2c_final
Working on datasets_Mooring_09bs5a_final
Working on datasets_Mooring_09bs5b_final
Working on datasets_Mooring_09bs8a_final
Working on datasets_Mooring_09bsm2a_final
Working on datasets_Mooring_09bsm4a_final
Working on datasets_Mooring_09bsp2a_final
Working on datasets_Mooring_09bsp2b_final
Working on datasets_Mooring_09bsp4a_final
Working on datasets_Mooring_09bsp5a_final
Working on datasets_Mooring_09bsp5b_final
Working on datasets_Mooring_09bsp9a_final
Working on datasets_Mooring_09bst2a_final
Working on datasets_Mooring_09bsv8a_final
Working on datasets_Mooring_09cb1a_final
Working on datasets_Mooring_09kc2a_final
Working on datasets_Mooring_09pa1a_final
Working on datasets_Mooring_09sbp1a_final
Working on datasets_Mooring_10bs2c_final
Working on datasets_Mooring_10bs4b_final
Working on datasets_Mooring_10bs5a_final
Working on datasets_Mooring_10bs5b_final
Working on datasets_Mooring_10bs8a_final
Working on datasets_Mooring_10bsm2a_final
Working on datasets_Mooring_10bsp2a_final
Working on datasets_Mooring_10bsp2b_final
Working on datasets_Mooring_10bsp5a_final
Working on datasets_Mooring_10bsp8a_final
Working on datasets_Mooring_10bst2a_final
Working on datasets_Mooring_10cb1a_final
Working on datasets_Mooring_10ckip1a_final
Working on datasets_Mooring_10ckip2a_final
Working on datasets_Mooring_10ckip3a_final
Working on datasets_Mooring_10ckp1a_final
Working on datasets_Mooring_10ckp2a_final
Working on datasets_Mooring_10ckp3a_final
Working on datasets_Mooring_10csp1a_final
Working on datasets_Mooring_10csp3a_final
Working on datasets_Mooring_10gl1a_final
Working on datasets_Mooring_10gl2a_final
Working on datasets_Mooring_10sep1a_final
Working on datasets_Mooring_10sep2a_final
Working on datasets_Mooring_10sep3a_final
Working on datasets_Mooring_10sep4a_final
Working on datasets_Mooring_11bs2c_final
Working on datasets_Mooring_11bs4a_final
Working on datasets_Mooring_11bs5a_final
Working on datasets_Mooring_11bs5b_final
Working on datasets_Mooring_11bs8a_final
Working on datasets_Mooring_11bsc2a_final
Working on datasets_Mooring_11bsm2a_final
Working on datasets_Mooring_11bsp2a_final
Working on datasets_Mooring_11bsp2b_final
Working on datasets_Mooring_11bsp4a_final
Working on datasets_Mooring_11bsp5a_final
Working on datasets_Mooring_11bsp5b_final
Working on datasets_Mooring_11bsp8a_final
Working on datasets_Mooring_11cb1a_final
Working on datasets_Mooring_11cb1b_final
Working on datasets_Mooring_11cbp3a_final
Working on datasets_Mooring_11ckip1a_final
Working on datasets_Mooring_11ckip2a_final
Working on datasets_Mooring_11ckip3a_final
Working on datasets_Mooring_11ckp1a_final
Working on datasets_Mooring_11ckp2a_final
Working on datasets_Mooring_11ckp3a_final
Working on datasets_Mooring_11cs12a_final
Working on datasets_Mooring_11cs13a_final
Working on datasets_Mooring_11csp11a_final
Working on datasets_Mooring_11gpp32a_final
Working on datasets_Mooring_11gpp34a_final
Working on datasets_Mooring_11gpp36a_final
Working on datasets_Mooring_11ip1a_final
Working on datasets_Mooring_11ipp2a_final
Working on datasets_Mooring_11kep41a_final
Working on datasets_Mooring_11pcp1a_final
Working on datasets_Mooring_11svp39a_final
Working on datasets_Mooring_12bs2c_final
Working on datasets_Mooring_12bs4a_final
Working on datasets_Mooring_12bs4b_final
Working on datasets_Mooring_12bs5a_final
Working on datasets_Mooring_12bs8a_final
Working on datasets_Mooring_12bsm2a_final
Working on datasets_Mooring_12bsp2a_final
Working on datasets_Mooring_12bsp2b_final
Working on datasets_Mooring_12bsp4a_final
Working on datasets_Mooring_12bsp4b_final
Working on datasets_Mooring_12bsp5a_final
Working on datasets_Mooring_12cb1a_final
Working on datasets_Mooring_12cb1b_final
Working on datasets_Mooring_12ckip2a_final
Working on datasets_Mooring_12ckip4a_final
Working on datasets_Mooring_12ckp2a_final
Working on datasets_Mooring_12ckp4a_final
Working on datasets_Mooring_12pa1a_final
Working on datasets_Mooring_13bs2c_final
Working on datasets_Mooring_13bs4a_final
Working on datasets_Mooring_13bs4b_final
Working on datasets_Mooring_13bs5a_final
Working on datasets_Mooring_13bs8a_final
Working on datasets_Mooring_13bsm2a_final
Working on datasets_Mooring_13bsp2a_final
Working on datasets_Mooring_13bsp2b_final
Working on datasets_Mooring_13bsp4a_final
Working on datasets_Mooring_13bsp8a_final
Working on datasets_Mooring_13cb1a_final
Working on datasets_Mooring_13ckp1a_final
Working on datasets_Mooring_13ckp2a_final
Working on datasets_Mooring_13ckp4a_final
Working on datasets_Mooring_13ckp5a_final
Working on datasets_Mooring_13ckp6a_final
Working on datasets_Mooring_13ckp7a_final
Working on datasets_Mooring_13cs12a_final
Working on datasets_Mooring_13cs13a_final
Working on datasets_Mooring_13csp11a_final
Working on datasets_Mooring_13csp2a_final
Working on datasets_Mooring_13csp3a_final
Working on datasets_Mooring_13gpp32a_final
Working on datasets_Mooring_13gpp34a_final
Working on datasets_Mooring_13gpp36a_final
Working on datasets_Mooring_13ipp2a_final
Working on datasets_Mooring_13kep41a_final
Working on datasets_Mooring_13pcp1a_final
Working on datasets_Mooring_13svp39a_final
Working on datasets_Mooring_14bs2c_final
Working on datasets_Mooring_14bs4b_final
Working on datasets_Mooring_14bs5a_final
Working on datasets_Mooring_14bs8a_final
Working on datasets_Mooring_14bsm2a_final
Working on datasets_Mooring_14bsp2a_final
Working on datasets_Mooring_14bsp5a_final
Working on datasets_Mooring_14bsp6a_final
Working on datasets_Mooring_14bsp8a_final
Working on datasets_Mooring_14ckp1a_final
Working on datasets_Mooring_14ckp2a_final
Working on datasets_Mooring_14ckp4a_final
Working on datasets_Mooring_14ckp5a_final
Working on datasets_Mooring_14ckp6a_final
Working on datasets_Mooring_14ckp7a_final
Working on datasets_Mooring_14ckp8a_final
Working on datasets_Mooring_14ckp9a_final
Working on datasets_Mooring_14ckt7a_final
Working on datasets_Mooring_14ubp1a_final
Working on datasets_Mooring_15bs2c_final
Working on datasets_Mooring_15bs4b_final
Working on datasets_Mooring_15bs5a_final
Working on datasets_Mooring_15bs8a_final
Working on datasets_Mooring_15bsm2a_final
Working on datasets_Mooring_15bsp2a_final
Working on datasets_Mooring_15bsp4a_final
Working on datasets_Mooring_15bsp8a_final
Working on datasets_Mooring_15cb1a_final
Working on datasets_Mooring_15ckp1a_final
Working on datasets_Mooring_15ckp2a_final
Working on datasets_Mooring_15ckp4a_final
Working on datasets_Mooring_15ckp9a_final
Working on datasets_Mooring_16bs2c_final
Working on datasets_Mooring_16bs4b_final
Working on datasets_Mooring_16bs5a_final
Working on datasets_Mooring_16bs8a_final
Working on datasets_Mooring_16bsm2a_final
Working on datasets_Mooring_16bsp2a_final
Working on datasets_Mooring_16bsp2b_final
Working on datasets_Mooring_16bsp5a_final
Working on datasets_Mooring_16bsp8a_final
Working on datasets_Mooring_16cb1a_final
Working on datasets_Mooring_16ckip2a_final
Working on datasets_Mooring_16ckp10a_final
Working on datasets_Mooring_16ckp11a_final
Working on datasets_Mooring_16ckp12a_final
Working on datasets_Mooring_16ckp1a_final
Working on datasets_Mooring_16ckp2a_final
Working on datasets_Mooring_16ckp3a_final
Working on datasets_Mooring_16ckp4a_final
Working on datasets_Mooring_16ckp5a_final
Working on datasets_Mooring_16ckp9a_final
Working on datasets_Mooring_17bs4b_final
Working on datasets_Mooring_17bs5a_final
Working on datasets_Mooring_17bs8a_final
###Markdown
Demo Script for ERDDAP transformationsTake Mooring Timeseries data and grid to 1hr so parameter(time,depth) - which is 1hr, 1m traditionally for EcoFOCI. Do not interpolate in depth. Use ERDDAP as datasourceTake CTD Collection of casts and grid ? (is this useful - not really)**designed with akutan in mind**
###Code
from erddapy import ERDDAP
import pandas as pd
import numpy as np
import xarray as xa
server_url = 'http://ecofoci-field.pmel.noaa.gov:8080/erddap'
###Output
_____no_output_____
###Markdown
Loop through all datasets
###Code
e = ERDDAP(server=server_url)
df = pd.read_csv(e.get_search_url(response='csv', search_for='datasets_Mooring AND final'))
print(f"{df['Dataset ID'].count()} datasets to be looped through")
print("Names:", df['Dataset ID'].values)
from requests.exceptions import HTTPError
#build datasets.xml
import os
try:
os.remove("erddap_gridded_from_tabledap.xml")
except OSError:
pass
f=open("erddap_gridded_from_tabledap.xml", "a+")
output_path = '/home/akutan/bell/in_and_outbox/erddap_generated/gridded_data_from_tabledap/'
for dataset_id in sorted(df['Dataset ID'].values):
if (not 'itae' in dataset_id) and (not 'gridded' in dataset_id) and (not 'Met' in dataset_id):
print(f'Working on {dataset_id}')
#read and import dataset
#print(dataset_id)
try:
d = ERDDAP(server=server_url,
protocol='tabledap',
response='csv'
)
d.dataset_id=dataset_id
except HTTPError:
print('Failed to generate url {}'.format(dataset_id))
try:
df_m = d.to_pandas(
index_col='time (UTC)',
parse_dates=True,
skiprows=(1,) # units information can be dropped.
)
df_m.sort_index(inplace=True)
df_m.columns = [x[1].split()[0] for x in enumerate(df_m.columns)]
#-9999 and 1e35 are missing values... but erddap doesn't catch the -9999 yet
#and some 1e35 are off a bit
df_m.loc[(df_m.depth == -9999)] = np.nan # only in pressure field
except:
print(f"something failed in data download {dataset_id}")
pass
try:
dfr = df_m.groupby('depth').resample('1H').mean()
dfr.index.names = ['depth','date']
xfr = dfr.drop('depth',axis=1).to_xarray()
xfr['date'] = xfr.date.astype('datetime64[ns]')
xfr.to_netcdf(dataset_id+'.nc',encoding={'date':{'units':'hours since 1970-01-01'}})
upperID = dataset_id.split('_')[-2].upper()
timestart = str(xfr.date.min().values).replace('00.000000000','00Z')
#write out xml file
f.write(f"""
<dataset type="EDDGridFromNcFiles" datasetID="1hr_gridded_{dataset_id}" active="true">
<reloadEveryNMinutes>10080</reloadEveryNMinutes>
<fileDir>{output_path}</fileDir>
<fileNameRegex>{dataset_id}.nc</fileNameRegex>
<recursive>false</recursive>
<pathRegex>.*</pathRegex>
<metadataFrom>last</metadataFrom>
<matchAxisNDigits>20</matchAxisNDigits>
<fileTableInMemory>false</fileTableInMemory>
<accessibleViaFiles>false</accessibleViaFiles>
<!-- sourceAttributes>
</sourceAttributes -->
<addAttributes>
<att name="cdm_data_type">Grid</att>
<att name="Conventions">COARDS, CF-1.6, ACDD-1.3</att>
<att name="infoUrl">https://pmel.noaa.gov/</att>
<att name="institution">NOAA/PMEL - EcoFOCI</att>
<att name="keywords">gridded, active, available, chemistry, chlorophyll, Chlorophyll_Fluorescence, color, concentration, concentration_of_chlorophyll_in_sea_water, data, date, density, depth, dissolved, dissolved o2, downwelling, downwelling_photosynthetic_photon_radiance_in_sea_water, earth, Earth Science > Oceans > Ocean Chemistry > Chlorophyll, Earth Science > Oceans > Ocean Optics > Photosynthetically Active Radiation, Earth Science > Oceans > Ocean Optics > Radiance, Earth Science > Oceans > Salinity/Density > Salinity, latitude, local, longitude, number, O2, ocean, ocean color, oceans, optical, optical properties, optics, oxygen, Oxygen_Concentration, Oxygen_Saturation, PAR, photon, photosynthetic, photosynthetically, practical, pressure, properties, radiance, radiation, salinity, saturation, science, sea, sea_water_practical_salinity, seawater, serial, Serial_Number, source, statistics, temperature, water</att>
<att name="keywords_vocabulary">GCMD Science Keywords</att>
<att name="license">[standard]</att>
<att name="standard_name_vocabulary">CF Standard Name Table v55</att>
<att name="summary">QC'd (final) mooring data from {upperID}. This dataset has been gridded to 1hr resolution (with pressure left at discrete depths). It uses only datasets from the associated erddap tabular {dataset_id}. A depth field exisists for each known instrument depth, even if the parameter wasn't measured at that depth. </att>
<att name="title">Gridded Mooring Data from {upperID} - final</att>
</addAttributes>
<axisVariable>
<sourceName>depth</sourceName>
<destinationName>depth</destinationName>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="ioos_category">Location</att>
<att name="long_name">Depth</att>
<att name="standard_name">depth</att>
<att name="units">m</att>
</addAttributes>
</axisVariable>
<axisVariable>
<sourceName>date</sourceName>
<destinationName>time</destinationName>
<!-- sourceAttributes>
<att name="calendar">proleptic_gregorian</att>
<att name="units">hours since 1970-01-01 00:00:00</att>
</sourceAttributes -->
<addAttributes>
<att name="ioos_category">Time</att>
<att name="long_name">Date</att>
<att name="source_name">date</att>
<att name="standard_name">time</att>
<att name="units">hours since 1970-01-01 00:00:00</att>
</addAttributes>
</axisVariable>
<dataVariable>
<sourceName>latitude</sourceName>
<destinationName>latitude</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="colorBarMaximum" type="double">90.0</att>
<att name="colorBarMinimum" type="double">-90.0</att>
<att name="ioos_category">Location</att>
<att name="long_name">Latitude</att>
<att name="standard_name">latitude</att>
<att name="units">degrees_north</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>longitude</sourceName>
<destinationName>longitude</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="colorBarMaximum" type="double">180.0</att>
<att name="colorBarMinimum" type="double">-180.0</att>
<att name="ioos_category">Location</att>
<att name="long_name">Longitude</att>
<att name="standard_name">longitude</att>
<att name="units">degrees_east</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>temperature</sourceName>
<destinationName>temperature</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="epic_key">T_20</att>
<att name="long_name">Sea temperature in-situ ITS-90 scale</att>
<att name="standard_name">sea_water_temperature</att>
<att name="units">degree_C</att>
<att name="colorBarMaximum" type="double">20.0</att>
<att name="colorBarMinimum" type="double">-2.0</att>
<att name="ioos_category">Temperature</att>
<att name="standard_name">sea_water_temperature</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>pressure</sourceName>
<destinationName>pressure</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="ioos_category">Sea Level</att>
<att name="long_name">Sea water pressure, equals 0 at sea-level</att>
<att name="standard_name">sea_water_pressure_due_to_sea_water</att>
<att name="units">dbar</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>salinity</sourceName>
<destinationName>salinity</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="colorBarMaximum" type="double">37.0</att>
<att name="colorBarMinimum" type="double">32.0</att>
<att name="ioos_category">Salinity</att>
<att name="long_name">Sea Water Practical Salinity</att>
<att name="standard_name">sea_water_practical_salinity</att>
<att name="units">PSU</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>PAR</sourceName>
<destinationName>PAR</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="colorBarMaximum" type="double">70.0</att>
<att name="colorBarMinimum" type="double">0.0</att>
<att name="ioos_category">Optical Properties</att>
<att name="long_name">Downwelling Photosynthetic Photon Radiance In Sea Water</att>
<att name="standard_name">downwelling_photosynthetic_photon_radiance_in_sea_water</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>Chlorophyll_Fluorescence</sourceName>
<destinationName>Chlorophyll_Fluorescence</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="colorBarMaximum" type="double">30.0</att>
<att name="colorBarMinimum" type="double">0.03</att>
<att name="colorBarScale">Log</att>
<att name="ioos_category">Ocean Color</att>
<att name="long_name">Concentration Of Chlorophyll In Sea Water</att>
<att name="standard_name">concentration_of_chlorophyll_in_sea_water</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>Oxygen_Saturation</sourceName>
<destinationName>Oxygen_Saturation</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="colorBarMaximum" type="double">100.0</att>
<att name="colorBarMinimum" type="double">0.0</att>
<att name="ioos_category">Dissolved O2</att>
<att name="units">percent</att>
<att name="standard_name">oxygen_saturation_over_air</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>Oxygen_Concentration</sourceName>
<destinationName>Oxygen_Concentration</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="ioos_category">Dissolved O2</att>
<att name="units">µmole/kg</att>
<att name="standard_name">volume_oxygen_in_solution_volume_of_sea_water</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>Serial_Number</sourceName>
<destinationName>Serial_Number</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="colorBarMaximum" type="double">100.0</att>
<att name="colorBarMinimum" type="double">0.0</att>
<att name="ioos_category">Statistics</att>
<att name="long_name">Serial Number</att>
</addAttributes>
</dataVariable>
</dataset>
""")
except:
print('Something failed in gridding this set')
print("Done")
###Output
Working on datasets_Mooring_00bs2a_final
Working on datasets_Mooring_00bs2w_final
Working on datasets_Mooring_00bs3w_final
Working on datasets_Mooring_00bs4s_final
Working on datasets_Mooring_00bs4w_final
Working on datasets_Mooring_00bs6a_final
Working on datasets_Mooring_00bsm2a_final
Working on datasets_Mooring_00bsp2s_final
Working on datasets_Mooring_00bsp3s_final
Working on datasets_Mooring_00bsp4s_final
Working on datasets_Mooring_00cb1a_final
Working on datasets_Mooring_00kc1a_final
Working on datasets_Mooring_00kc2a_final
Working on datasets_Mooring_00pa1a_final
Working on datasets_Mooring_01bs2c_final
Working on datasets_Mooring_01bs4a_final
Working on datasets_Mooring_01bs4b_final
Working on datasets_Mooring_01bsm2a_final
Working on datasets_Mooring_01bsp2b_final
Working on datasets_Mooring_01bsp2s_final
Working on datasets_Mooring_01gbm3a_final
Working on datasets_Mooring_01gbm3b_final
Working on datasets_Mooring_02bs2c_final
Working on datasets_Mooring_02bs4a_final
Working on datasets_Mooring_02bs4b_final
Working on datasets_Mooring_02bsm2a_final
Working on datasets_Mooring_02bsp2a_final
Working on datasets_Mooring_02bsp2b_final
Working on datasets_Mooring_02bsp2c_final
Working on datasets_Mooring_03amp1a_final
Working on datasets_Mooring_03amp1b_final
Working on datasets_Mooring_03amp2a_final
Working on datasets_Mooring_03amp2b_final
Working on datasets_Mooring_03amp3a_final
Working on datasets_Mooring_03amp3b_final
Working on datasets_Mooring_03amp4a_final
Working on datasets_Mooring_03amp4b_final
Working on datasets_Mooring_03bs2a_final
Working on datasets_Mooring_03bs2c_final
Working on datasets_Mooring_03bs4a_final
Working on datasets_Mooring_03bs4b_final
Working on datasets_Mooring_03bsm2a_final
Working on datasets_Mooring_03bsp2a_final
Working on datasets_Mooring_03bsp2b_final
Working on datasets_Mooring_03bsp2c_final
Working on datasets_Mooring_03bsp6a_final
Working on datasets_Mooring_03cb1a_final
Working on datasets_Mooring_03cb1b_final
Working on datasets_Mooring_03gb1a_final
Working on datasets_Mooring_03gb1b_final
Working on datasets_Mooring_03gb2a_final
Working on datasets_Mooring_03gb2b_final
Working on datasets_Mooring_03gb4a_final
Working on datasets_Mooring_03gb4b_final
Something failed in gridding this set
Working on datasets_Mooring_03gbm3a_final
Working on datasets_Mooring_03gbm3b_final
Working on datasets_Mooring_03gbp12a_final
Working on datasets_Mooring_03gbp12b_final
Working on datasets_Mooring_03gbp3a_final
Working on datasets_Mooring_03gbp3b_final
Working on datasets_Mooring_03gbp5a_final
Working on datasets_Mooring_03gbp5b_final
Working on datasets_Mooring_03gp32a_final
Working on datasets_Mooring_03gp32b_final
Working on datasets_Mooring_03gp34a_final
Working on datasets_Mooring_03gp34b_final
Working on datasets_Mooring_03gpp36a_final
Working on datasets_Mooring_03gpp36b_final
Working on datasets_Mooring_03gsp6a_final
Working on datasets_Mooring_03gsp7a_final
Working on datasets_Mooring_03gsp8a_final
Working on datasets_Mooring_03kc1a_final
Working on datasets_Mooring_03kc2a_final
Working on datasets_Mooring_03pa1a_final
Working on datasets_Mooring_03sg2a_final
Something failed in gridding this set
Working on datasets_Mooring_03sg3a_final
Working on datasets_Mooring_03sg3b_final
Working on datasets_Mooring_03sg5a_final
Working on datasets_Mooring_03sg5b_final
Working on datasets_Mooring_03sgp1a_final
Working on datasets_Mooring_03sgp1b_final
Working on datasets_Mooring_03ssp1a_final
Working on datasets_Mooring_03ssp1b_final
Working on datasets_Mooring_03ssp2a_final
Working on datasets_Mooring_03ssp2b_final
Working on datasets_Mooring_03ssp3a_final
Working on datasets_Mooring_03ssp3b_final
Working on datasets_Mooring_04bs2c_final
Working on datasets_Mooring_04bs4a_final
Working on datasets_Mooring_04bs4b_final
Working on datasets_Mooring_04bsm2a_final
Working on datasets_Mooring_04bsp2a_final
Working on datasets_Mooring_04bsp2b_final
Working on datasets_Mooring_04bsp5a_final
Working on datasets_Mooring_04cb1a_final
Working on datasets_Mooring_04gb1a_final
Working on datasets_Mooring_04gbi2a_final
Working on datasets_Mooring_04gbm3a_final
Working on datasets_Mooring_04gbp3a_final
Working on datasets_Mooring_04gbp5a_final
Working on datasets_Mooring_04gbt1a_final
Working on datasets_Mooring_04gbt2a_final
Working on datasets_Mooring_04gbt4a_final
Working on datasets_Mooring_04gp32a_final
Working on datasets_Mooring_04gp34a_final
Working on datasets_Mooring_04gpp36a_final
Working on datasets_Mooring_04kc2a_final
Working on datasets_Mooring_04pa1a_final
Working on datasets_Mooring_04pi7a_final
Working on datasets_Mooring_04pi8a_final
Working on datasets_Mooring_04pip1a_final
Working on datasets_Mooring_04pip2a_final
Working on datasets_Mooring_04pip3a_final
Working on datasets_Mooring_04pip4a_final
Working on datasets_Mooring_04pip5a_final
Working on datasets_Mooring_04pip6a_final
Working on datasets_Mooring_04ssp2a_final
Working on datasets_Mooring_04ssp3a_final
Working on datasets_Mooring_04stl1a_final
Working on datasets_Mooring_04stl1b_final
Working on datasets_Mooring_05amp1a_final
Working on datasets_Mooring_05amp2a_final
Working on datasets_Mooring_05amp3a_final
Working on datasets_Mooring_05amp4a_final
Working on datasets_Mooring_05bs2c_final
Working on datasets_Mooring_05bs4a_final
Working on datasets_Mooring_05bs4b_final
Working on datasets_Mooring_05bs5a_final
Working on datasets_Mooring_05bs5b_final
Working on datasets_Mooring_05bs8a_final
Working on datasets_Mooring_05bs8b_final
Working on datasets_Mooring_05bsm2a_final
Working on datasets_Mooring_05bsp2a_final
Working on datasets_Mooring_05bsp2b_final
Working on datasets_Mooring_05bsp5b_final
Working on datasets_Mooring_05bsp8a_final
Working on datasets_Mooring_05bsp8b_final
Working on datasets_Mooring_05cb1a_final
Working on datasets_Mooring_05csp1a_final
Working on datasets_Mooring_05gbi1a_final
Working on datasets_Mooring_05kc1a_final
Working on datasets_Mooring_05kc2a_final
Working on datasets_Mooring_05pa1a_final
Working on datasets_Mooring_05ssp1a_final
Working on datasets_Mooring_05ssp2a_final
Working on datasets_Mooring_05ssp3a_final
Working on datasets_Mooring_06amp1a_final
Working on datasets_Mooring_06amp2a_final
Working on datasets_Mooring_06amp3a_final
Working on datasets_Mooring_06amp4a_final
Working on datasets_Mooring_06bs2c_final
Working on datasets_Mooring_06bs4a_final
Working on datasets_Mooring_06bs4b_final
Working on datasets_Mooring_06bs5a_final
Working on datasets_Mooring_06bs5b_final
Working on datasets_Mooring_06bs8a_final
Working on datasets_Mooring_06bsm2a_final
Working on datasets_Mooring_06bsp2a_final
Working on datasets_Mooring_06bsp2d_final
Working on datasets_Mooring_06bsp4a_final
Working on datasets_Mooring_06bsp5b_final
Working on datasets_Mooring_06bsp8a_final
Working on datasets_Mooring_06bst2a_final
Working on datasets_Mooring_06cb1a_final
Working on datasets_Mooring_06chp1a_final
Working on datasets_Mooring_06csp1a_final
Working on datasets_Mooring_06kc1a_final
Working on datasets_Mooring_06kc2a_final
Working on datasets_Mooring_06pa1a_final
Working on datasets_Mooring_06ssp1a_final
Working on datasets_Mooring_06ssp2a_final
Working on datasets_Mooring_06ssp3a_final
Working on datasets_Mooring_07amp2a_final
Working on datasets_Mooring_07amp3a_final
Working on datasets_Mooring_07amp4a_final
Working on datasets_Mooring_07bs2c_final
Working on datasets_Mooring_07bs4a_final
Working on datasets_Mooring_07bs4b_final
Working on datasets_Mooring_07bs5a_final
Working on datasets_Mooring_07bs5b_final
Working on datasets_Mooring_07bs8a_final
Working on datasets_Mooring_07bsm2a_final
Working on datasets_Mooring_07bsp4a_final
Working on datasets_Mooring_07bsp4b_final
Working on datasets_Mooring_07bsp5a_final
Working on datasets_Mooring_07bsp5b_final
Working on datasets_Mooring_07bsp8a_final
Working on datasets_Mooring_07bst2a_final
Working on datasets_Mooring_07cb1a_final
Working on datasets_Mooring_07kc2a_final
Working on datasets_Mooring_07pa1a_final
Working on datasets_Mooring_08amp1a_final
Working on datasets_Mooring_08amp2a_final
Working on datasets_Mooring_08amp3a_final
Working on datasets_Mooring_08amp4a_final
Working on datasets_Mooring_08bs2c_final
Working on datasets_Mooring_08bs4a_final
Working on datasets_Mooring_08bs4b_final
Working on datasets_Mooring_08bs5b_final
Working on datasets_Mooring_08bs8a_final
Working on datasets_Mooring_08bsm2a_final
Working on datasets_Mooring_08bsp2b_final
Working on datasets_Mooring_08bsp4a_final
Working on datasets_Mooring_08bsp4b_final
Working on datasets_Mooring_08bsp5b_final
Working on datasets_Mooring_08bsp8a_final
Working on datasets_Mooring_08bsp9a_final
Working on datasets_Mooring_08bst2a_final
Working on datasets_Mooring_08bsv5a_final
Working on datasets_Mooring_08bsv8a_final
Working on datasets_Mooring_08cb1a_final
Working on datasets_Mooring_08kc2a_final
Working on datasets_Mooring_08pa1a_final
Working on datasets_Mooring_08sbp1a_final
Working on datasets_Mooring_09bs2c_final
Working on datasets_Mooring_09bs5a_final
Working on datasets_Mooring_09bs5b_final
Working on datasets_Mooring_09bs8a_final
Working on datasets_Mooring_09bsm2a_final
Working on datasets_Mooring_09bsm4a_final
Working on datasets_Mooring_09bsp2a_final
Working on datasets_Mooring_09bsp2b_final
Working on datasets_Mooring_09bsp4a_final
Working on datasets_Mooring_09bsp5a_final
Working on datasets_Mooring_09bsp5b_final
Working on datasets_Mooring_09bsp9a_final
Working on datasets_Mooring_09bst2a_final
Working on datasets_Mooring_09bsv8a_final
Working on datasets_Mooring_09cb1a_final
Working on datasets_Mooring_09kc2a_final
Working on datasets_Mooring_09pa1a_final
Working on datasets_Mooring_09sbp1a_final
Working on datasets_Mooring_10bs2c_final
Working on datasets_Mooring_10bs4b_final
Working on datasets_Mooring_10bs5a_final
Working on datasets_Mooring_10bs5b_final
Working on datasets_Mooring_10bs8a_final
Working on datasets_Mooring_10bsm2a_final
Working on datasets_Mooring_10bsp2a_final
Working on datasets_Mooring_10bsp2b_final
Working on datasets_Mooring_10bsp5a_final
Working on datasets_Mooring_10bsp8a_final
Working on datasets_Mooring_10bst2a_final
Working on datasets_Mooring_10cb1a_final
Working on datasets_Mooring_10ckip1a_final
Working on datasets_Mooring_10ckip2a_final
Working on datasets_Mooring_10ckip3a_final
Working on datasets_Mooring_10ckp1a_final
Working on datasets_Mooring_10ckp2a_final
Working on datasets_Mooring_10ckp3a_final
Working on datasets_Mooring_10csp1a_final
Working on datasets_Mooring_10csp3a_final
Working on datasets_Mooring_10gl1a_final
Working on datasets_Mooring_10gl2a_final
Working on datasets_Mooring_10sep1a_final
Working on datasets_Mooring_10sep2a_final
Working on datasets_Mooring_10sep3a_final
Working on datasets_Mooring_10sep4a_final
Working on datasets_Mooring_11bs2c_final
Working on datasets_Mooring_11bs4a_final
Working on datasets_Mooring_11bs5a_final
Working on datasets_Mooring_11bs5b_final
Working on datasets_Mooring_11bs8a_final
Working on datasets_Mooring_11bsc2a_final
Working on datasets_Mooring_11bsm2a_final
Working on datasets_Mooring_11bsp2a_final
Working on datasets_Mooring_11bsp2b_final
Working on datasets_Mooring_11bsp4a_final
Working on datasets_Mooring_11bsp5a_final
Working on datasets_Mooring_11bsp5b_final
Working on datasets_Mooring_11bsp8a_final
Working on datasets_Mooring_11cb1a_final
Working on datasets_Mooring_11cb1b_final
Working on datasets_Mooring_11cbp3a_final
Working on datasets_Mooring_11ckip1a_final
Working on datasets_Mooring_11ckip2a_final
Working on datasets_Mooring_11ckip3a_final
Working on datasets_Mooring_11ckp1a_final
Working on datasets_Mooring_11ckp2a_final
Working on datasets_Mooring_11ckp3a_final
Working on datasets_Mooring_11cs12a_final
Working on datasets_Mooring_11cs13a_final
Working on datasets_Mooring_11csp11a_final
Working on datasets_Mooring_11csp1a_final
Working on datasets_Mooring_11csp2a_final
Working on datasets_Mooring_11csp3a_final
Working on datasets_Mooring_11gpp32a_final
Working on datasets_Mooring_11gpp34a_final
Working on datasets_Mooring_11gpp36a_final
Working on datasets_Mooring_11ip1a_final
Working on datasets_Mooring_11ipp2a_final
Working on datasets_Mooring_11kep41a_final
Working on datasets_Mooring_11pcp1a_final
Working on datasets_Mooring_11svp39a_final
Working on datasets_Mooring_12bs2c_final
Working on datasets_Mooring_12bs4a_final
Working on datasets_Mooring_12bs4b_final
Working on datasets_Mooring_12bs5a_final
Working on datasets_Mooring_12bs8a_final
Working on datasets_Mooring_12bsm2a_final
Working on datasets_Mooring_12bsp2a_final
Working on datasets_Mooring_12bsp2b_final
Working on datasets_Mooring_12bsp4a_final
Working on datasets_Mooring_12bsp4b_final
Working on datasets_Mooring_12bsp5a_final
Working on datasets_Mooring_12cb1a_final
Working on datasets_Mooring_12cb1b_final
Working on datasets_Mooring_12ckip2a_final
Working on datasets_Mooring_12ckip4a_final
Working on datasets_Mooring_12ckp2a_final
Working on datasets_Mooring_12ckp4a_final
Working on datasets_Mooring_12pa1a_final
Working on datasets_Mooring_13bs2c_final
Working on datasets_Mooring_13bs4a_final
Working on datasets_Mooring_13bs4b_final
Working on datasets_Mooring_13bs5a_final
Working on datasets_Mooring_13bs8a_final
Working on datasets_Mooring_13bsm2a_final
Working on datasets_Mooring_13bsp2a_final
Working on datasets_Mooring_13bsp2b_final
Working on datasets_Mooring_13bsp4a_final
Working on datasets_Mooring_13bsp8a_final
Working on datasets_Mooring_13cb1a_final
Working on datasets_Mooring_13ckp1a_final
Working on datasets_Mooring_13ckp2a_final
Working on datasets_Mooring_13ckp4a_final
Working on datasets_Mooring_13ckp5a_final
Working on datasets_Mooring_13ckp6a_final
Working on datasets_Mooring_13ckp7a_final
Working on datasets_Mooring_13cs12a_final
Working on datasets_Mooring_13cs13a_final
Working on datasets_Mooring_13csp11a_final
Working on datasets_Mooring_13csp1a_final
Working on datasets_Mooring_13csp2a_final
Working on datasets_Mooring_13csp3a_final
Working on datasets_Mooring_13gpp32a_final
Working on datasets_Mooring_13gpp34a_final
Working on datasets_Mooring_13gpp36a_final
Working on datasets_Mooring_13ipp2a_final
Working on datasets_Mooring_13kep41a_final
Working on datasets_Mooring_13pcp1a_final
Working on datasets_Mooring_13svp39a_final
Working on datasets_Mooring_14bs2c_final
Working on datasets_Mooring_14bs4b_final
Working on datasets_Mooring_14bs5a_final
Working on datasets_Mooring_14bs8a_final
Working on datasets_Mooring_14bsm2a_final
Working on datasets_Mooring_14bsp2a_final
Working on datasets_Mooring_14bsp5a_final
Working on datasets_Mooring_14bsp6a_final
Working on datasets_Mooring_14bsp8a_final
Working on datasets_Mooring_14ckp1a_final
Working on datasets_Mooring_14ckp2a_final
Working on datasets_Mooring_14ckp4a_final
Working on datasets_Mooring_14ckp5a_final
Working on datasets_Mooring_14ckp6a_final
Working on datasets_Mooring_14ckp7a_final
Working on datasets_Mooring_14ckp8a_final
Working on datasets_Mooring_14ckp9a_final
Working on datasets_Mooring_14ckt7a_final
Working on datasets_Mooring_14ubp1a_final
Working on datasets_Mooring_15bs2c_final
Working on datasets_Mooring_15bs4b_final
Working on datasets_Mooring_15bs5a_final
Working on datasets_Mooring_15bs8a_final
Working on datasets_Mooring_15bsm2a_final
Working on datasets_Mooring_15bsp2a_final
Working on datasets_Mooring_15bsp2b_final
Working on datasets_Mooring_15bsp4a_final
Working on datasets_Mooring_15bsp8a_final
Working on datasets_Mooring_15cb1a_final
Working on datasets_Mooring_15ckp1a_final
Working on datasets_Mooring_15ckp2a_final
Working on datasets_Mooring_15ckp4a_final
Working on datasets_Mooring_15ckp9a_final
Working on datasets_Mooring_16bs2c_final
Working on datasets_Mooring_16bs4b_final
Working on datasets_Mooring_16bs5a_final
Working on datasets_Mooring_16bs8a_final
Working on datasets_Mooring_16bsm2a_final
Working on datasets_Mooring_16bsp2a_final
Working on datasets_Mooring_16bsp2b_final
Working on datasets_Mooring_16bsp5a_final
Working on datasets_Mooring_16bsp8a_final
Working on datasets_Mooring_16cb1a_final
Working on datasets_Mooring_16ckip2a_final
Working on datasets_Mooring_16ckp10a_final
Working on datasets_Mooring_16ckp11a_final
Working on datasets_Mooring_16ckp12a_final
Working on datasets_Mooring_16ckp1a_final
Working on datasets_Mooring_16ckp2a_final
Working on datasets_Mooring_16ckp3a_final
Working on datasets_Mooring_16ckp4a_final
Working on datasets_Mooring_16ckp5a_final
Working on datasets_Mooring_16ckp9a_final
Working on datasets_Mooring_17bs4b_final
Working on datasets_Mooring_17bs5a_final
Working on datasets_Mooring_17bs8a_final
|
deepschool.io/Lesson 13 - Transfer Learning.ipynb | ###Markdown
Transfer LearningWith certain data types it is possible to use the weights learned in one task to be **transferred** to another task. For example in a task that is used to detect Animals and Vehicles in images (as done in CIFAR10) could be reused to classify dogs and cats. Transfer Learning is heavily used in Image recognition and Natural Language Processing (NLP) related tasks.This tutorial is based on https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html.
###Code
!pip install tqdm
!conda install -y Pillow
import numpy as np
import matplotlib.pyplot as plt
from urllib.request import urlretrieve
from os.path import isfile, isdir, getsize
from os import mkdir, makedirs, remove
from tqdm import tqdm
import zipfile
import pickle
from keras.models import Sequential, Model
from keras import optimizers
from keras.layers import Dense, Activation, Conv2D, MaxPool2D, Flatten, BatchNormalization, Dropout
from keras.preprocessing.image import ImageDataGenerator
import glob
import shutil
import pickle
%matplotlib inline
###Output
Using Theano backend.
###Markdown
Download and extract the doge and cate pictures.
###Code
catdog_dataset_folder_path = 'catdog'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('catdog.zip'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Doge n Cate Dataset') as pbar:
urlretrieve(
'https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip',
'catdog.zip',
pbar.hook)
if not isdir(catdog_dataset_folder_path):
mkdir(catdog_dataset_folder_path)
with zipfile.ZipFile('catdog.zip') as f:
f.extractall('./'+catdog_dataset_folder_path)
# Unfortunately some of the files are corrupt so we need to clean these out:
!apt-get install -y jhead > /dev/null 2>&1
!jhead -de catdog/PetImages/Cat/*.jpg > /dev/null 2>&1
!jhead -de catdog/PetImages/Dog/*.jpg > /dev/null 2>&1
files = glob.glob(catdog_dataset_folder_path+'/PetImages/**/*.jpg')
labels = np.array([0]*12500+[1]*12500)
size = np.zeros(len(files))
for i,f in enumerate(files):
size[i] = getsize(f)
idx = np.where(size==0)[0]
for i in idx[::-1]:
del files[i]
labels = np.delete(labels, i)
###Output
_____no_output_____
###Markdown
In keras we are required to place the training images in a certain folder, with the subfolders structured so that each subfolder contains the class. We will structure the validation folder in the same way:```data/ train/ dogs/ dog001.jpg dog002.jpg ... cats/ cat001.jpg cat002.jpg ... validation/ dogs/ dog001.jpg dog002.jpg ... cats/ cat001.jpg cat002.jpg ...``` From the dataset we randomly choose 20000 images and moves them to training and the rest to testing folders.
###Code
len_data = len(files)
train_examples = 20000
test_examples = len_data - train_examples
# randomly choose 20000 as training and testing cases
permutation = np.random.permutation(len_data)
train_set = [files[i] for i in permutation[:][:train_examples]]
test_set = [files[i] for i in permutation[-test_examples:]]
train_labels = labels[permutation[:train_examples]]
test_labels = labels[permutation[-test_examples:]]
train_folder = catdog_dataset_folder_path+'/train'
test_folder = catdog_dataset_folder_path+'/test'
if isdir(train_folder): #if directory already exists
shutil.rmtree(train_folder)
if isdir(test_folder): #if directory already exists
shutil.rmtree(test_folder)
makedirs(train_folder+'/cat/')
makedirs(train_folder+'/dog/')
makedirs(test_folder+'/cat/')
makedirs(test_folder+'/dog/')
for f,i in zip(train_set, train_labels):
if i==0:
shutil.copy2(f, train_folder+'/cat/')
else:
shutil.copy2(f, train_folder+'/dog/')
for f,i in zip(test_set, test_labels):
if i==0:
shutil.copy2(f, test_folder+'/cat/')
else:
shutil.copy2(f, test_folder+'/dog/')
###Output
_____no_output_____
###Markdown
View some sample images:
###Code
datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=5,
zoom_range=0.2,
horizontal_flip=True)
img_height = img_width = 100
channels = 3
train_generator = datagen.flow_from_directory(
train_folder,
color_mode = "rgb",
target_size=(img_height, img_width),
batch_size=1,
class_mode=None)
i = 0
img_list = []
for batch in train_generator: #.flow(x, batch_size=1)
img_list.append(batch)
i += 1
if i > 5:
break
for img in img_list:
plt.imshow(np.squeeze(img))
plt.show()
###Output
Found 20000 images belonging to 2 classes.
###Markdown
Basic logistic multiclass classification:Always, ALWAYS compare to the most basic possible ML/ statistics algorithm. In this case logistic regression.
###Code
batch_size = 1000
train_generator = datagen.flow_from_directory(
train_folder,
color_mode = "rgb",
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
x_train, y_train = next(train_generator)
x_test, y_test = next(train_generator)
from sklearn.linear_model import LogisticRegression
logistic = LogisticRegression()
logistic.fit(x_train.reshape(batch_size,-1), y_train)
y_pred = logistic.predict(x_test.reshape(len(x_test), -1))
y_pred[:10]
###Output
_____no_output_____
###Markdown
Predicting the probabilities for the first 3 images:
###Code
logistic.predict_proba(x_test[:3].reshape(3,-1))
###Output
_____no_output_____
###Markdown
Accuracy of the predictions:
###Code
np.count_nonzero(y_pred == y_test)/len(y_test)
###Output
_____no_output_____
###Markdown
Convolution Neural Networks (CNN)
###Code
model = Sequential()
# TODO: Add a CNN:
# Note 1: The input_shape needs to be specified in this case (input_height, input_width, channels)
# Note 2: The order usually goes Conv2D, Activation, MaxPool,
# Note 3: Must be flattened before passing onto Dense layers
# Note 4: The loss is binary_crossentropy
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
batch_size = 128
train_generator = datagen.flow_from_directory(
train_folder,
color_mode = "rgb",
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(train_generator, train_examples//batch_size, epochs=2)
batch_size = 1
test_generator = datagen.flow_from_directory(
test_folder,
color_mode = "rgb",
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary',
shuffle=False)
y_pred = model.predict_generator(test_generator, test_examples//batch_size, workers=4)
# model.predict_classes(test_x)
# np.count_nonzero(y_pred == test_y)/len(test_y)
correct = 0
for i, f in enumerate(test_generator.filenames):
if f.startswith('cat') and y_pred[i]<0.5:
correct +=1
if f.startswith('dog') and y_pred[i]>=0.5:
correct +=1
print('Correct predictions: '+str(correct/len(test_generator.filenames)))
batch_size = 6
test_generator = datagen.flow_from_directory(
test_folder,
color_mode = "rgb",
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary',
shuffle=True)
x_test, y_test = next(test_generator)
p = model.predict(x_test)
p = np.hstack([y_pred, 1-y_pred])
label_dict = {0: 'cat', 1: 'dog'}
plt.figure(figsize=(12,12))
for i in range(batch_size):
plt.subplot(batch_size,2,2*i+1)
plt.imshow(x_test[i])
plt.title(label_dict[y_test[i]])
plt.subplot(batch_size,2,2*i+2)
plt.bar(range(2),p[i])
plt.xticks(range(2), ['cat', 'dog'])
# plt.show()
plt.show()
p
###Output
_____no_output_____
###Markdown
Transfer Learning - Part 1
###Code
from keras import applications
datagen = ImageDataGenerator(rescale=1.0/255)
model = applications.VGG16(include_top=False, input_shape=(img_width, img_height, channels))
model = applications.VGG16(include_top=False, weights='imagenet')
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) (None, None, None, 3) 0
_________________________________________________________________
block1_conv1 (Conv2D) (None, None, None, 64) 1792
_________________________________________________________________
block1_conv2 (Conv2D) (None, None, None, 64) 36928
_________________________________________________________________
block1_pool (MaxPooling2D) (None, None, None, 64) 0
_________________________________________________________________
block2_conv1 (Conv2D) (None, None, None, 128) 73856
_________________________________________________________________
block2_conv2 (Conv2D) (None, None, None, 128) 147584
_________________________________________________________________
block2_pool (MaxPooling2D) (None, None, None, 128) 0
_________________________________________________________________
block3_conv1 (Conv2D) (None, None, None, 256) 295168
_________________________________________________________________
block3_conv2 (Conv2D) (None, None, None, 256) 590080
_________________________________________________________________
block3_conv3 (Conv2D) (None, None, None, 256) 590080
_________________________________________________________________
block3_pool (MaxPooling2D) (None, None, None, 256) 0
_________________________________________________________________
block4_conv1 (Conv2D) (None, None, None, 512) 1180160
_________________________________________________________________
block4_conv2 (Conv2D) (None, None, None, 512) 2359808
_________________________________________________________________
block4_conv3 (Conv2D) (None, None, None, 512) 2359808
_________________________________________________________________
block4_pool (MaxPooling2D) (None, None, None, 512) 0
_________________________________________________________________
block5_conv1 (Conv2D) (None, None, None, 512) 2359808
_________________________________________________________________
block5_conv2 (Conv2D) (None, None, None, 512) 2359808
_________________________________________________________________
block5_conv3 (Conv2D) (None, None, None, 512) 2359808
_________________________________________________________________
block5_pool (MaxPooling2D) (None, None, None, 512) 0
=================================================================
Total params: 14,714,688.0
Trainable params: 14,714,688.0
Non-trainable params: 0.0
_________________________________________________________________
###Markdown
**Do not _uncomment_ and run the following two blocks unless absolutely necessary**. It takes almost one hour to run. It took me a while to understand why in the Keras blog they had saved the parameters. It isn't necassary for you to save it. However, if you do come back to the tutorial you probably dont want to run this section again. It is slow mainly because there's 14 Million parameters to go through for each example. Having a GPU in this instance would help tremendously. Note 1It is however important to notice that I am **not** training in this block. I am predicting using a truncated VGG16 net. See how I set the `include_top=False` parameter above. VGG16 was originally trained on the CIFAR10 dataset so that it would predict 10 classes. Now that we are truncating it and only using all but the top few layers (lyer closes to the prediction), it outputs a (3,3,512) image in our case.
###Code
batch_size = 128
generator = datagen.flow_from_directory(
train_folder,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
# bottleneck_features_train = model.predict_generator(generator, train_examples//batch_size, verbose=1, workers=4)
# pickle.dump(bottleneck_features_train, open('bottleneck_features_train.npy', 'wb'))
# bottleneck_features_train.shape
batch_size = 128
valid_generator = datagen.flow_from_directory(
test_folder,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
# bottleneck_features_valid = model.predict_generator(generator, test_examples//batch_size, verbose=1, workers=4)
# with open('bottleneck_features_valid.npy', 'wb') as f:
# pickle.dump(bottleneck_features_valid, f)
# bottleneck_features_valid.shape
bott
with open('bottleneck_features_train.npy','rb') as f:
bottleneck_features_train = pickle.load(f)
model = Sequential()
# TODO: Make a 1 hidden layer NN
# Note 1: Add Flatten() layer The input shape is the bottleneck features dimension
# Note 2: Choose a suitable dimension for the hidden layer (eg. half way between final node and dimension of input)
# Note 3: Last layer is 1 with activation sigmoid (remember we are trying to predict a probability)
# Note 4: See previous todo section to see what the loss is supposed to be (unless you know already of course)
model.summary()
batch_size = 128
generator = datagen.flow_from_directory(
train_folder,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
labels = np.array([0 if f.startswith('cat') else 1 for f in generator.filenames])[:len(bottleneck_features_train)]
model.fit(bottleneck_features_train, labels, epochs=10, batch_size=batch_size)
with open('bottleneck_features_valid.npy','rb') as f:
bottleneck_features_valid = pickle.load(f)
valid_labels = np.array([0 if f.startswith('cat') else 1 for f in valid_generator.filenames])[:len(bottleneck_features_valid)]
y_valid_pred = model.predict_classes(bottleneck_features_valid)
accuracy = np.count_nonzero(valid_labels == y_valid_pred.ravel())/len(valid_labels)
print('\nThe accuracy is: '+str(accuracy))
from PIL import Image
img = Image.open('doge.jpg')
img.thumbnail((img_height, img_width), Image.ANTIALIAS)
###Output
_____no_output_____
###Markdown
Transfer Learning - Part 2We can refine the model further by adjusting the last convolutional layer.Note that vgg16 is of type `Model` and not `Sequential`. Hence we cannot `add` the top layer as suggested in the keras blog. Note 2We are setting the trainable weights to be everything but the last convolutional layer and the fully connected (dense) layers. Take note of the number of trainable parameters in the summary below.
###Code
vgg16 = applications.VGG16(include_top=False, weights='imagenet', input_shape=(img_width, img_height, channels))
combinedModel = Model(inputs= vgg16.input, outputs= model(vgg16.output))
for layer in combinedModel.layers[:-3]:
layer.trainable = False
combinedModel.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_3 (InputLayer) (None, 100, 100, 3) 0
_________________________________________________________________
block1_conv1 (Conv2D) (None, 100, 100, 64) 1792
_________________________________________________________________
block1_conv2 (Conv2D) (None, 100, 100, 64) 36928
_________________________________________________________________
block1_pool (MaxPooling2D) (None, 50, 50, 64) 0
_________________________________________________________________
block2_conv1 (Conv2D) (None, 50, 50, 128) 73856
_________________________________________________________________
block2_conv2 (Conv2D) (None, 50, 50, 128) 147584
_________________________________________________________________
block2_pool (MaxPooling2D) (None, 25, 25, 128) 0
_________________________________________________________________
block3_conv1 (Conv2D) (None, 25, 25, 256) 295168
_________________________________________________________________
block3_conv2 (Conv2D) (None, 25, 25, 256) 590080
_________________________________________________________________
block3_conv3 (Conv2D) (None, 25, 25, 256) 590080
_________________________________________________________________
block3_pool (MaxPooling2D) (None, 12, 12, 256) 0
_________________________________________________________________
block4_conv1 (Conv2D) (None, 12, 12, 512) 1180160
_________________________________________________________________
block4_conv2 (Conv2D) (None, 12, 12, 512) 2359808
_________________________________________________________________
block4_conv3 (Conv2D) (None, 12, 12, 512) 2359808
_________________________________________________________________
block4_pool (MaxPooling2D) (None, 6, 6, 512) 0
_________________________________________________________________
block5_conv1 (Conv2D) (None, 6, 6, 512) 2359808
_________________________________________________________________
block5_conv2 (Conv2D) (None, 6, 6, 512) 2359808
_________________________________________________________________
block5_conv3 (Conv2D) (None, 6, 6, 512) 2359808
_________________________________________________________________
block5_pool (MaxPooling2D) (None, 3, 3, 512) 0
_________________________________________________________________
sequential_1 (Sequential) (None, 1) 1180161
=================================================================
Total params: 15,894,849.0
Trainable params: 3,539,969.0
Non-trainable params: 12,354,880.0
_________________________________________________________________
###Markdown
You can try to use the `adagrad` optmizer if you wish but you'll soon see that all the progress that was made in `model` will be undone. It will infact overwrite the weights in `model` and you would have to rerun the `model` training from the `bottleneck_features` section.Why? It's so that the updates are small and does not destablise the weights that were previously learnt.
###Code
model.save_weights('fc_model.h5')
combinedModel.compile(loss='binary_crossentropy',
optimizer = optimizers.RMSprop(lr=1e-4, decay=0.9), # optimizers.SGD(lr=1e-4, momentum=0.9)
metrics=['accuracy'])
# prepare data augmentation configuration
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_folder,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
test_folder,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
# fine-tune the model
combinedModel.fit_generator(
train_generator,
steps_per_epoch=train_examples//batch_size,
epochs=5,
validation_data=validation_generator,
validation_steps=test_examples//batch_size) # len(valid_generator.filenames)
# fine-tune the model
combinedModel.fit_generator(
train_generator,
steps_per_epoch=train_examples//batch_size,
epochs=5,
validation_data=validation_generator,
validation_steps=test_examples//batch_size) # len(valid_generator.filenames)
###Output
Epoch 1/5
59/156 [==========>...................] - ETA: 1636s - loss: 0.3259 - acc: 0.8567
###Markdown
Predictions:
###Code
from PIL import Image
img = Image.open('doge.jpg')
img = np.asarray(img.resize((img_height, img_width), Image.ANTIALIAS))/255
plt.imshow(img)
plt.show()
p = combinedModel.predict(np.array([img]))
print('The probability that this is a doge is: ' +str(p[0][0]))
img = Image.open('grumpy_cat.jpeg')
img = np.asarray(img.resize((img_height, img_width), Image.ANTIALIAS))/255
plt.imshow(img)
plt.show()
p = combinedModel.predict(np.array([img]))
print('The probability that this is a doge is: ' +str(p[0][0]))
###Output
_____no_output_____ |
tutorials/Appendix B Quick topics/QComponent - 3-fingers capacitor.ipynb | ###Markdown
QComponent - 3-fingers capacitor Standard imports
###Code
%reload_ext autoreload
%autoreload 2
%config IPCompleter.greedy=True
# Qiskit Metal
from qiskit_metal import designs
from qiskit_metal import MetalGUI, Dict
###Output
_____no_output_____
###Markdown
Define design object from design_planar and start GUI
###Code
design = designs.DesignPlanar()
gui = MetalGUI(design)
# enable rebuild of the same component
design.overwrite_enabled = True
###Output
_____no_output_____
###Markdown
Import the device and inspect the default options
###Code
from qiskit_metal.qlibrary.lumped.cap_3_interdigital import Cap3Interdigital
Cap3Interdigital.get_template_options(design)
###Output
_____no_output_____
###Markdown
Instanciate the capacitive device
###Code
design.delete_all_components()
c1 = Cap3Interdigital(design, 'C1')
gui.rebuild()
gui.autoscale()
###Output
_____no_output_____
###Markdown
Change something about it
###Code
c1.options['finger_length'] = '200um'
gui.rebuild()
gui.autoscale()
###Output
_____no_output_____
###Markdown
QComponent - 3-fingers capacitor Standard imports
###Code
%reload_ext autoreload
%autoreload 2
%config IPCompleter.greedy=True
# Qiskit Metal
from qiskit_metal import designs
from qiskit_metal import MetalGUI, Dict
###Output
_____no_output_____
###Markdown
Define design object from design_planar and start GUI
###Code
design = designs.DesignPlanar()
gui = MetalGUI(design)
# enable rebuild of the same component
design.overwrite_enabled = True
###Output
_____no_output_____
###Markdown
Import the device and inspect the default options
###Code
from qiskit_metal.qlibrary.lumped.cap_3_interdigital import Cap3Interdigital
Cap3Interdigital.get_template_options(design)
###Output
_____no_output_____
###Markdown
Instanciate the capacitive device
###Code
design.delete_all_components()
c1 = Cap3Interdigital(design, 'C1')
gui.rebuild()
gui.autoscale()
###Output
_____no_output_____
###Markdown
Change something about it
###Code
c1.options['finger_length'] = '200um'
gui.rebuild()
gui.autoscale()
###Output
_____no_output_____ |
chapter2-6/chapter6_parallelism.ipynb | ###Markdown
필요한 모듈을 임포트합니다.
###Code
import tensorflow as tf
import datetime
###Output
_____no_output_____
###Markdown
10 제곱을 계산합니다.
###Code
n = 10
###Output
_____no_output_____
###Markdown
1000x1000개의 난수를 갖는 행렬 두개를 만듭니다.
###Code
A = np.random.rand(1000, 1000).astype('float32')
B = np.random.rand(1000, 1000).astype('float32')
A.shape, B.shape
###Output
_____no_output_____
###Markdown
결과를 저장할 두개의 리스트를 만듭니다.
###Code
c1 = []
c2 = []
###Output
_____no_output_____
###Markdown
재귀함수를 사용하여 행렬의 거듭제곱을 계산하는 함수 matpow를 만듭니다.
###Code
def matpow(M, n):
if n < 1: #Abstract cases where n < 1
return M
else:
return tf.matmul(M, matpow(M, n-1))
###Output
_____no_output_____
###Markdown
GPU가 없는 경우 아래 '/gpu:0'을 '/cpu:0'로 바꾸어 주세요.
###Code
with tf.device('/gpu:0'):
a = tf.constant(A)
b = tf.constant(B)
#compute A^n and B^n and store results in c1
c1.append(matpow(a, n))
c1.append(matpow(b, n))
###Output
_____no_output_____
###Markdown
CPU를 사용하여 C1의 엘리먼트 값을 모두 더합니다.
###Code
with tf.device('/cpu:0'):
sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n
###Output
_____no_output_____
###Markdown
세션을 만들고 그래프를 실행합니다. 주피터 노트북을 실행한 쉘에 선택한 디바이스에 대한 로그가 나타납니다.
###Code
t1_1 = datetime.datetime.now()
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
# Runs the op.
print(sess.run(sum))
t2_1 = datetime.datetime.now()
print("Single CPU computation time: " + str(t2_1-t1_1))
###Output
Single CPU computation time: 0:00:00.736783
###Markdown
필요한 모듈을 임포트합니다.
###Code
import numpy as np
import tensorflow as tf
import datetime
###Output
_____no_output_____
###Markdown
10 제곱을 계산합니다.
###Code
n = 10
###Output
_____no_output_____
###Markdown
1000x1000개의 난수를 갖는 행렬 두개를 만듭니다.
###Code
A = np.random.rand(1000, 1000).astype('float32')
B = np.random.rand(1000, 1000).astype('float32')
A.shape, B.shape
###Output
_____no_output_____
###Markdown
결과를 저장할 두개의 리스트를 만듭니다.
###Code
c1 = []
c2 = []
###Output
_____no_output_____
###Markdown
재귀함수를 사용하여 행렬의 거듭제곱을 계산하는 함수 matpow를 만듭니다.
###Code
def matpow(M, n):
if n < 1: #Abstract cases where n < 1
return M
else:
return tf.matmul(M, matpow(M, n-1))
###Output
_____no_output_____
###Markdown
GPU가 있는 경우 아래 '/cpu:0'을 '/gpu:1'로 바꾸어 주세요.
###Code
with tf.device('/cpu:0'):
a = tf.constant(A)
b = tf.constant(B)
#compute A^n and B^n and store results in c1
c1.append(matpow(a, n))
c1.append(matpow(b, n))
###Output
_____no_output_____
###Markdown
CPU를 사용하여 C1의 엘리먼트 값을 모두 더합니다.
###Code
with tf.device('/cpu:0'):
sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n
###Output
_____no_output_____
###Markdown
세션을 만들고 그래프를 실행합니다. 주피터 노트북을 실행한 쉘에 선택한 디바이스에 대한 로그가 나타납니다.
###Code
t1_1 = datetime.datetime.now()
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
# Runs the op.
print(sess.run(sum))
t2_1 = datetime.datetime.now()
print("Single CPU computation time: " + str(t2_1-t1_1))
###Output
Single CPU computation time: 0:00:00.475806
###Markdown
필요한 모듈을 임포트합니다.
###Code
import tensorflow as tf
import datetime
###Output
_____no_output_____
###Markdown
10 제곱을 계산합니다.
###Code
n = 10
###Output
_____no_output_____
###Markdown
1000x1000개의 난수를 갖는 행렬 두개를 만듭니다.
###Code
A = np.random.rand(1000, 1000).astype('float32')
B = np.random.rand(1000, 1000).astype('float32')
A.shape, B.shape
###Output
_____no_output_____
###Markdown
결과를 저장할 두개의 리스트를 만듭니다.
###Code
c1 = []
c2 = []
###Output
_____no_output_____
###Markdown
재귀함수를 사용하여 행렬의 거듭제곱을 계산하는 함수 matpow를 만듭니다.
###Code
def matpow(M, n):
if n < 1: #Abstract cases where n < 1
return M
else:
return tf.matmul(M, matpow(M, n-1))
###Output
_____no_output_____
###Markdown
GPU가 없는 경우 아래 '/gpu:0'을 '/cpu:0'로 바꾸어 주세요.
###Code
with tf.device('/gpu:0'):
a = tf.constant(A)
b = tf.constant(B)
#compute A^n and B^n and store results in c1
c1.append(matpow(a, n))
c1.append(matpow(b, n))
###Output
_____no_output_____
###Markdown
CPU를 사용하여 C1의 엘리먼트 값을 모두 더합니다.
###Code
with tf.device('/cpu:0'):
sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n
###Output
_____no_output_____
###Markdown
세션을 만들고 그래프를 실행합니다. 주피터 노트북을 실행한 쉘에 선택한 디바이스에 대한 로그가 나타납니다.
###Code
t1_1 = datetime.datetime.now()
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
# Runs the op.
print(sess.run(sum))
t2_1 = datetime.datetime.now()
print("Single CPU computation time: " + str(t2_1-t1_1))
###Output
Single CPU computation time: 0:00:00.655085
|
nbs/12_cluster_analysis/003_02-hc-umap.ipynb | ###Markdown
Description Runs hierarchical clustering on the umap version of the data. Environment variables
###Code
from IPython.display import display
import conf
N_JOBS = conf.GENERAL["N_JOBS"]
display(N_JOBS)
%env MKL_NUM_THREADS=$N_JOBS
%env OPEN_BLAS_NUM_THREADS=$N_JOBS
%env NUMEXPR_NUM_THREADS=$N_JOBS
%env OMP_NUM_THREADS=$N_JOBS
###Output
env: MKL_NUM_THREADS=2
env: OPEN_BLAS_NUM_THREADS=2
env: NUMEXPR_NUM_THREADS=2
env: OMP_NUM_THREADS=2
###Markdown
Modules loading
###Code
%load_ext autoreload
%autoreload 2
from pathlib import Path
import numpy as np
import pandas as pd
from utils import generate_result_set_name
###Output
_____no_output_____
###Markdown
Settings
###Code
np.random.seed(0)
###Output
_____no_output_____
###Markdown
Input data
###Code
INPUT_SUBSET = "umap"
INPUT_STEM = "z_score_std-projection-smultixcan-efo_partial-mashr-zscores"
# parameters of the dimentionality reduction steps
DR_OPTIONS = {
"n_components": 50,
"metric": "euclidean",
"n_neighbors": 15,
"random_state": 0,
}
input_filepath = Path(
conf.RESULTS["DATA_TRANSFORMATIONS_DIR"],
INPUT_SUBSET,
generate_result_set_name(
DR_OPTIONS, prefix=f"{INPUT_SUBSET}-{INPUT_STEM}-", suffix=".pkl"
),
).resolve()
display(input_filepath)
assert input_filepath.exists(), "Input file does not exist"
input_filepath_stem = input_filepath.stem
display(input_filepath_stem)
###Output
_____no_output_____
###Markdown
Clustering
###Code
from sklearn.cluster import AgglomerativeClustering
CLUSTERING_ATTRIBUTES_TO_SAVE = ["n_clusters"]
CLUSTERING_OPTIONS = {}
CLUSTERING_OPTIONS["K_MIN"] = 2
CLUSTERING_OPTIONS["K_MAX"] = 75 # sqrt(3749) + some more to get closer to 295
CLUSTERING_OPTIONS["LINKAGE"] = {"ward", "complete", "average", "single"}
CLUSTERING_OPTIONS["AFFINITY"] = "euclidean"
display(CLUSTERING_OPTIONS)
CLUSTERERS = {}
idx = 0
for k in range(CLUSTERING_OPTIONS["K_MIN"], CLUSTERING_OPTIONS["K_MAX"] + 1):
for linkage in CLUSTERING_OPTIONS["LINKAGE"]:
if linkage == "ward":
affinity = "euclidean"
else:
affinity = "precomputed"
clus = AgglomerativeClustering(
n_clusters=k,
affinity=affinity,
linkage=linkage,
)
method_name = type(clus).__name__
CLUSTERERS[f"{method_name} #{idx}"] = clus
idx = idx + 1
display(len(CLUSTERERS))
_iter = iter(CLUSTERERS.items())
display(next(_iter))
display(next(_iter))
clustering_method_name = method_name
display(clustering_method_name)
###Output
_____no_output_____
###Markdown
Output directory
###Code
# output dir for this notebook
RESULTS_DIR = Path(
conf.RESULTS["CLUSTERING_RUNS_DIR"],
f"{INPUT_SUBSET}-{INPUT_STEM}",
).resolve()
RESULTS_DIR.mkdir(parents=True, exist_ok=True)
display(RESULTS_DIR)
###Output
_____no_output_____
###Markdown
Load input file
###Code
data = pd.read_pickle(input_filepath)
data.shape
data.head()
assert not data.isna().any().any()
###Output
_____no_output_____
###Markdown
Clustering Generate ensemble
###Code
from sklearn.metrics import pairwise_distances
from clustering.ensembles.utils import generate_ensemble
data_dist = pairwise_distances(data, metric=CLUSTERING_OPTIONS["AFFINITY"])
data_dist.shape
pd.Series(data_dist.flatten()).describe().apply(str)
ensemble = generate_ensemble(
data_dist,
CLUSTERERS,
attributes=CLUSTERING_ATTRIBUTES_TO_SAVE,
affinity_matrix=data_dist,
)
# the number should be close to 295 (the number of partitions generated by k-means/spectral clustering)
ensemble.shape
ensemble.head()
ensemble["n_clusters"].value_counts().head()
ensemble_stats = ensemble["n_clusters"].describe()
display(ensemble_stats)
###Output
_____no_output_____
###Markdown
Testing
###Code
assert ensemble_stats["min"] > 1
assert not ensemble["n_clusters"].isna().any()
assert ensemble.shape[0] == len(CLUSTERERS)
# all partitions have the right size
assert np.all(
[part["partition"].shape[0] == data.shape[0] for idx, part in ensemble.iterrows()]
)
# no partition has negative clusters (noisy points)
assert not np.any([(part["partition"] < 0).any() for idx, part in ensemble.iterrows()])
###Output
_____no_output_____
###Markdown
Save
###Code
del CLUSTERING_OPTIONS["LINKAGE"]
output_filename = Path(
RESULTS_DIR,
generate_result_set_name(
CLUSTERING_OPTIONS,
prefix=f"{clustering_method_name}-",
suffix=".pkl",
),
).resolve()
display(output_filename)
ensemble.to_pickle(output_filename)
###Output
_____no_output_____ |
_doc/notebooks/r2python.ipynb | ###Markdown
Convert a R script into PythonThis notebook introduces the function [r2python](find://code-r2python) which converts *R* into *Python*. It does not work for eveything, it is being improved everytime it is needed.
###Code
rscript = """
nb=function(y=1930){
debut=1816
MatDFemale=matrix(D$Female,nrow=111)
colnames(MatDFemale)=(debut+0):198
cly=(y-debut+1):111
deces=diag(MatDFemale[:,cly[cly%in%1:199]])
return(c(B$Female[B$Year==y],deces))}
"""
from pyensae.languages import r2python
print(r2python(rscript, pep8=True))
###Output
def nb(y=1930):
debut = 1816
MatDFemale = matrix(D . Female, nrow=111)
colnames(MatDFemale) .set(range((debut + 0), 198))
cly = range((y - debut + 1), 111)
deces = diag(MatDFemale[:, cly[set(cly) & set(range(1, 199))]])
return tuple(B . Female[B . Year == y], deces)
|
how-to-use-azureml/ml-frameworks/using-mlflow/train-and-deploy-pytorch/train-and-deploy-pytorch.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  Use MLflow with Azure Machine Learning to Train and Deploy PyTorch Image ClassifierThis example shows you how to use MLflow together with Azure Machine Learning services for tracking the metrics and artifacts while training a PyTorch model to classify MNIST digit images and deploy the model as a web service. You'll learn how to: 1. Set up MLflow tracking URI so as to use Azure ML 2. Create experiment 3. Instrument your model with MLflow tracking 4. Train a PyTorch model locally 5. Train a model on GPU compute on Azure 6. View your experiment within your Azure ML Workspace in Azure Portal 7. Deploy the model as a web service on Azure Container Instance 8. Call the model to make predictions Pre-requisites If you are using a Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipnyb) notebook to set up your Azure Machine Learning workspace and ensure other common prerequisites are met.Install PyTorch, this notebook has been tested with torch==1.4Also, install azureml-mlflow package using ```pip install azureml-mlflow```. Note that azureml-mlflow installs mlflow package itself as a dependency if you haven't done so previously. Set-upImport packages and check versions of Azure ML SDK and MLflow installed on your computer. Then connect to your Workspace.
###Code
import sys, os
import mlflow
import mlflow.azureml
import azureml.core
from azureml.core import Workspace
print("SDK version:", azureml.core.VERSION)
print("MLflow version:", mlflow.version.VERSION)
ws = Workspace.from_config()
ws.get_details()
###Output
_____no_output_____
###Markdown
Set tracking URISet the MLflow tracking URI to point to your Azure ML Workspace. The subsequent logging calls from MLflow APIs will go to Azure ML services and will be tracked under your Workspace.
###Code
mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())
###Output
_____no_output_____
###Markdown
Create ExperimentIn both MLflow and Azure ML, training runs are grouped into experiments. Let's create one for our experimentation.
###Code
experiment_name = "pytorch-with-mlflow"
mlflow.set_experiment(experiment_name)
###Output
_____no_output_____
###Markdown
Train model locally while logging metrics and artifactsThe ```scripts/train.py``` program contains the code to load the image dataset, train and test the model. Within this program, the train.driver function wraps the end-to-end workflow.Within the driver, the ```mlflow.start_run``` starts MLflow tracking. Then, ```mlflow.log_metric``` functions are used to track the convergence of the neural network training iterations. Finally ```mlflow.pytorch.save_model``` is used to save the trained model in framework-aware manner.Let's add the program to search path, import it as a module and invoke the driver function. Note that the training can take few minutes.
###Code
lib_path = os.path.abspath("scripts")
sys.path.append(lib_path)
import train
run = train.driver()
###Output
_____no_output_____
###Markdown
Train model on GPU compute on AzureNext, let's run the same script on GPU-enabled compute for faster training. If you've completed the the [Configuration](../../../configuration.ipnyb) notebook, you should have a GPU cluster named "gpu-cluster" available in your workspace. Otherwise, follow the instructions in the notebook to create one. For simplicity, this example uses single process on single VM to train the model.Clone an environment object from the PyTorch 1.4 Azure ML curated environment. Azure ML curated environments are pre-configured environments to simplify ML setup, reference [this doc](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-use-environmentsuse-a-curated-environment) for more information. To enable MLflow tracking, add ```azureml-mlflow``` as pip package.
###Code
from azureml.core import Environment
env = Environment.get(workspace=ws, name="AzureML-PyTorch-1.4-GPU").clone("mlflow-env")
env.python.conda_dependencies.add_pip_package("azureml-mlflow")
env.python.conda_dependencies.add_pip_package("Pillow==6.0.0")
###Output
_____no_output_____
###Markdown
Create a ScriptRunConfig to specify the training configuration: script, compute as well as environment.
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory="./scripts", script="train.py")
src.run_config.environment = env
src.run_config.target = "gpu-cluster"
###Output
_____no_output_____
###Markdown
Get a reference to the experiment you created previously, but this time, as an Azure Machine Learning experiment object.Then, use the ```Experiment.submit``` method to start the remote training run. Note that the first training run often takes longer as Azure Machine Learning service builds the Docker image for executing the script. Subsequent runs will be faster as the cached image is used.
###Code
from azureml.core import Experiment
exp = Experiment(ws, experiment_name)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
You can monitor the run and its metrics on Azure Portal.
###Code
run
###Output
_____no_output_____
###Markdown
Also, you can wait for run to complete.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
Deploy model as web serviceThe ```client.create_deployment``` function registers the logged PyTorch model and deploys the model in a framework-aware manner. It automatically creates the PyTorch-specific inferencing wrapper code and specifies package dependencies for you. See [this doc](https://mlflow.org/docs/latest/models.htmlid34) for more information on deploying models on Azure ML using MLflow.In this example, we deploy the Docker image to Azure Container Instance: a serverless compute capable of running a single container. You can tag and add descriptions to help keep track of your web service. [Other inferencing compute choices](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where) include Azure Kubernetes Service which provides scalable endpoint suitable for production use.Note that the service deployment can take several minutes. First define your deployment target and customize parameters in the deployment config. Refer to [this documentation](https://docs.microsoft.com/azure/machine-learning/reference-azure-machine-learning-cliazure-container-instance-deployment-configuration-schema) for more information.
###Code
import json
# Data to be written
deploy_config ={
"computeType": "aci"
}
# Serializing json
json_object = json.dumps(deploy_config)
# Writing to sample.json
with open("deployment_config.json", "w") as outfile:
outfile.write(json_object)
from mlflow.deployments import get_deploy_client
# set the tracking uri as the deployment client
client = get_deploy_client(mlflow.get_tracking_uri())
# set the model path
model_path = "model"
# set the deployment config
deployment_config_path = "deployment_config.json"
test_config = {'deploy-config-file': deployment_config_path}
# define the model path and the name is the service name
# the model gets registered automatically and a name is autogenerated using the "name" parameter below
client.create_deployment(model_uri='runs:/{}/{}'.format(run.id, model_path),
config=test_config,
name="keras-aci-deployment")
###Output
_____no_output_____
###Markdown
Once the deployment has completed you can check the scoring URI of the web service in AzureML studio UI in the endpoints tab. Refer [mlflow predict](https://mlflow.org/docs/latest/python_api/mlflow.deployments.htmlmlflow.deployments.BaseDeploymentClient.predict) on how to test your deployment.
###Code
client.delete("keras-aci-deployment")
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  Use MLflow with Azure Machine Learning to Train and Deploy PyTorch Image ClassifierThis example shows you how to use MLflow together with Azure Machine Learning services for tracking the metrics and artifacts while training a PyTorch model to classify MNIST digit images and deploy the model as a web service. You'll learn how to: 1. Set up MLflow tracking URI so as to use Azure ML 2. Create experiment 3. Instrument your model with MLflow tracking 4. Train a PyTorch model locally 5. Train a model on GPU compute on Azure 6. View your experiment within your Azure ML Workspace in Azure Portal 7. Deploy the model as a web service on Azure Container Instance 8. Call the model to make predictions Pre-requisites If you are using a Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipnyb) notebook to set up your Azure Machine Learning workspace and ensure other common prerequisites are met.Install PyTorch, this notebook has been tested with torch==1.4Also, install azureml-mlflow package using ```pip install azureml-mlflow```. Note that azureml-mlflow installs mlflow package itself as a dependency if you haven't done so previously. Set-upImport packages and check versions of Azure ML SDK and MLflow installed on your computer. Then connect to your Workspace.
###Code
import sys, os
import mlflow
import mlflow.azureml
import azureml.core
from azureml.core import Workspace
print("SDK version:", azureml.core.VERSION)
print("MLflow version:", mlflow.version.VERSION)
ws = Workspace.from_config()
ws.get_details()
###Output
_____no_output_____
###Markdown
Set tracking URISet the MLflow tracking URI to point to your Azure ML Workspace. The subsequent logging calls from MLflow APIs will go to Azure ML services and will be tracked under your Workspace.
###Code
mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())
###Output
_____no_output_____
###Markdown
Create ExperimentIn both MLflow and Azure ML, training runs are grouped into experiments. Let's create one for our experimentation.
###Code
experiment_name = "pytorch-with-mlflow"
mlflow.set_experiment(experiment_name)
###Output
_____no_output_____
###Markdown
Train model locally while logging metrics and artifactsThe ```scripts/train.py``` program contains the code to load the image dataset, train and test the model. Within this program, the train.driver function wraps the end-to-end workflow.Within the driver, the ```mlflow.start_run``` starts MLflow tracking. Then, ```mlflow.log_metric``` functions are used to track the convergence of the neural network training iterations. Finally ```mlflow.pytorch.save_model``` is used to save the trained model in framework-aware manner.Let's add the program to search path, import it as a module and invoke the driver function. Note that the training can take few minutes.
###Code
lib_path = os.path.abspath("scripts")
sys.path.append(lib_path)
import train
run = train.driver()
###Output
_____no_output_____
###Markdown
Train model on GPU compute on AzureNext, let's run the same script on GPU-enabled compute for faster training. If you've completed the the [Configuration](../../../configuration.ipnyb) notebook, you should have a GPU cluster named "gpu-cluster" available in your workspace. Otherwise, follow the instructions in the notebook to create one. For simplicity, this example uses single process on single VM to train the model.Clone an environment object from the PyTorch 1.4 Azure ML curated environment. Azure ML curated environments are pre-configured environments to simplify ML setup, reference [this doc](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-use-environmentsuse-a-curated-environment) for more information. To enable MLflow tracking, add ```azureml-mlflow``` as pip package.
###Code
from azureml.core import Environment
env = Environment.get(workspace=ws, name="AzureML-PyTorch-1.4-GPU").clone("mlflow-env")
env.python.conda_dependencies.add_pip_package("azureml-mlflow")
env.python.conda_dependencies.add_pip_package("Pillow==6.0.0")
###Output
_____no_output_____
###Markdown
Create a ScriptRunConfig to specify the training configuration: script, compute as well as environment.
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory="./scripts", script="train.py")
src.run_config.environment = env
src.run_config.target = "gpu-cluster"
###Output
_____no_output_____
###Markdown
Get a reference to the experiment you created previously, but this time, as an Azure Machine Learning experiment object.Then, use the ```Experiment.submit``` method to start the remote training run. Note that the first training run often takes longer as Azure Machine Learning service builds the Docker image for executing the script. Subsequent runs will be faster as the cached image is used.
###Code
from azureml.core import Experiment
exp = Experiment(ws, experiment_name)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
You can monitor the run and its metrics on Azure Portal.
###Code
run
###Output
_____no_output_____
###Markdown
Also, you can wait for run to complete.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
Deploy model as web serviceThe ```mlflow.azureml.deploy``` function registers the logged PyTorch model and deploys the model in a framework-aware manner. It automatically creates the PyTorch-specific inferencing wrapper code and specifies package dependencies for you. See [this doc](https://mlflow.org/docs/latest/models.htmlid34) for more information on deploying models on Azure ML using MLflow.In this example, we deploy the Docker image to Azure Container Instance: a serverless compute capable of running a single container. You can tag and add descriptions to help keep track of your web service. [Other inferencing compute choices](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where) include Azure Kubernetes Service which provides scalable endpoint suitable for production use.Note that the service deployment can take several minutes.
###Code
from azureml.core.webservice import AciWebservice, Webservice
model_path = "model"
aci_config = AciWebservice.deploy_configuration(cpu_cores=2,
memory_gb=5,
tags={"data": "MNIST", "method" : "pytorch"},
description="Predict using webservice")
webservice, azure_model = mlflow.azureml.deploy(model_uri='runs:/{}/{}'.format(run.id, model_path),
workspace=ws,
deployment_config=aci_config,
service_name="pytorch-mnist-1",
model_name="pytorch_mnist")
###Output
_____no_output_____
###Markdown
Once the deployment has completed you can check the scoring URI of the web service.
###Code
print("Scoring URI is: {}".format(webservice.scoring_uri))
###Output
_____no_output_____
###Markdown
In case of a service creation issue, you can use ```webservice.get_logs()``` to get logs to debug. Make predictions using a web serviceTo make the web service, create a test data set as normalized PyTorch tensors. Then, let's define a utility function that takes a random image and converts it into a format and shape suitable for input to the PyTorch inferencing end-point. The conversion is done by: 1. Select a random (image, label) tuple 2. Take the image and converting the tensor to NumPy array 3. Reshape array into 1 x 1 x N array * 1 image in batch, 1 color channel, N = 784 pixels for MNIST images * Note also ```x = x.view(-1, 1, 28, 28)``` in net definition in ```train.py``` program to shape incoming scoring requests. 4. Convert the NumPy array to list to make it into a built-in type. 5. Create a dictionary {"data", <list>} that can be converted to JSON string for web service requests.
###Code
from torchvision import datasets, transforms
import random
import numpy as np
# Use Azure Open Datasets for MNIST dataset
datasets.MNIST.resources = [
("https://azureopendatastorage.azurefd.net/mnist/train-images-idx3-ubyte.gz",
"f68b3c2dcbeaaa9fbdd348bbdeb94873"),
("https://azureopendatastorage.azurefd.net/mnist/train-labels-idx1-ubyte.gz",
"d53e105ee54ea40749a09fcbcd1e9432"),
("https://azureopendatastorage.azurefd.net/mnist/t10k-images-idx3-ubyte.gz",
"9fb629c4189551a2d022fa330f9573f3"),
("https://azureopendatastorage.azurefd.net/mnist/t10k-labels-idx1-ubyte.gz",
"ec29112dd5afa0611ce80d1b7f02629c")
]
test_data = datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
def get_random_image():
image_idx = random.randint(0,len(test_data))
image_as_tensor = test_data[image_idx][0]
return {"data": elem for elem in image_as_tensor.numpy().reshape(1,1,-1).tolist()}
###Output
_____no_output_____
###Markdown
Then, invoke the web service using a random test image. Convert the dictionary containing the image to JSON string before passing it to web service.The response contains the raw scores for each label, with greater value indicating higher probability. Sort the labels and select the one with greatest score to get the prediction. Let's also plot the image sent to web service for comparison purposes.
###Code
%matplotlib inline
import json
import matplotlib.pyplot as plt
test_image = get_random_image()
response = webservice.run(json.dumps(test_image))
response = sorted(response[0].items(), key = lambda x: x[1], reverse = True)
print("Predicted label:", response[0][0])
plt.imshow(np.array(test_image["data"]).reshape(28,28), cmap = "gray")
###Output
_____no_output_____
###Markdown
You can also call the web service using a raw POST method against the web service
###Code
import requests
response = requests.post(url=webservice.scoring_uri, data=json.dumps(test_image),headers={"Content-type": "application/json"})
print(response.text)
###Output
_____no_output_____
###Markdown
Clean upYou can delete the ACI deployment with a delete API call.
###Code
webservice.delete()
###Output
_____no_output_____
###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License.  Use MLflow with Azure Machine Learning to Train and Deploy PyTorch Image ClassifierThis example shows you how to use MLflow together with Azure Machine Learning services for tracking the metrics and artifacts while training a PyTorch model to classify MNIST digit images and deploy the model as a web service. You'll learn how to: 1. Set up MLflow tracking URI so as to use Azure ML 2. Create experiment 3. Instrument your model with MLflow tracking 4. Train a PyTorch model locally 5. Train a model on GPU compute on Azure 6. View your experiment within your Azure ML Workspace in Azure Portal 7. Deploy the model as a web service on Azure Container Instance 8. Call the model to make predictions Pre-requisites If you are using a Notebook VM, you are all set. Otherwise, go through the [Configuration](../../../../configuration.ipnyb) notebook to set up your Azure Machine Learning workspace and ensure other common prerequisites are met.Install PyTorch, this notebook has been tested with torch==1.4Also, install azureml-mlflow package using ```pip install azureml-mlflow```. Note that azureml-mlflow installs mlflow package itself as a dependency if you haven't done so previously. Set-upImport packages and check versions of Azure ML SDK and MLflow installed on your computer. Then connect to your Workspace.
###Code
import sys, os
import mlflow
import mlflow.azureml
import azureml.core
from azureml.core import Workspace
print("SDK version:", azureml.core.VERSION)
print("MLflow version:", mlflow.version.VERSION)
ws = Workspace.from_config()
ws.get_details()
###Output
_____no_output_____
###Markdown
Set tracking URISet the MLflow tracking URI to point to your Azure ML Workspace. The subsequent logging calls from MLflow APIs will go to Azure ML services and will be tracked under your Workspace.
###Code
mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())
###Output
_____no_output_____
###Markdown
Create ExperimentIn both MLflow and Azure ML, training runs are grouped into experiments. Let's create one for our experimentation.
###Code
experiment_name = "pytorch-with-mlflow"
mlflow.set_experiment(experiment_name)
###Output
_____no_output_____
###Markdown
Train model locally while logging metrics and artifactsThe ```scripts/train.py``` program contains the code to load the image dataset, train and test the model. Within this program, the train.driver function wraps the end-to-end workflow.Within the driver, the ```mlflow.start_run``` starts MLflow tracking. Then, ```mlflow.log_metric``` functions are used to track the convergence of the neural network training iterations. Finally ```mlflow.pytorch.save_model``` is used to save the trained model in framework-aware manner.Let's add the program to search path, import it as a module and invoke the driver function. Note that the training can take few minutes.
###Code
lib_path = os.path.abspath("scripts")
sys.path.append(lib_path)
import train
run = train.driver()
###Output
_____no_output_____
###Markdown
Train model on GPU compute on AzureNext, let's run the same script on GPU-enabled compute for faster training. If you've completed the the [Configuration](../../../configuration.ipnyb) notebook, you should have a GPU cluster named "gpu-cluster" available in your workspace. Otherwise, follow the instructions in the notebook to create one. For simplicity, this example uses single process on single VM to train the model.Clone an environment object from the PyTorch 1.4 Azure ML curated environment. Azure ML curated environments are pre-configured environments to simplify ML setup, reference [this doc](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-use-environmentsuse-a-curated-environment) for more information. To enable MLflow tracking, add ```azureml-mlflow``` as pip package.
###Code
from azureml.core import Environment
env = Environment.get(workspace=ws, name="AzureML-PyTorch-1.4-GPU").clone("mlflow-env")
env.python.conda_dependencies.add_pip_package("azureml-mlflow")
env.python.conda_dependencies.add_pip_package("Pillow==6.0.0")
###Output
_____no_output_____
###Markdown
Create a ScriptRunConfig to specify the training configuration: script, compute as well as environment.
###Code
from azureml.core import ScriptRunConfig
src = ScriptRunConfig(source_directory="./scripts", script="train.py")
src.run_config.environment = env
src.run_config.target = "gpu-cluster"
###Output
_____no_output_____
###Markdown
Get a reference to the experiment you created previously, but this time, as an Azure Machine Learning experiment object.Then, use the ```Experiment.submit``` method to start the remote training run. Note that the first training run often takes longer as Azure Machine Learning service builds the Docker image for executing the script. Subsequent runs will be faster as the cached image is used.
###Code
from azureml.core import Experiment
exp = Experiment(ws, experiment_name)
run = exp.submit(src)
###Output
_____no_output_____
###Markdown
You can monitor the run and its metrics on Azure Portal.
###Code
run
###Output
_____no_output_____
###Markdown
Also, you can wait for run to complete.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
Deploy model as web serviceThe ```mlflow.azureml.deploy``` function registers the logged PyTorch model and deploys the model in a framework-aware manner. It automatically creates the PyTorch-specific inferencing wrapper code and specifies package dependencies for you. See [this doc](https://mlflow.org/docs/latest/models.htmlid34) for more information on deploying models on Azure ML using MLflow.In this example, we deploy the Docker image to Azure Container Instance: a serverless compute capable of running a single container. You can tag and add descriptions to help keep track of your web service. [Other inferencing compute choices](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where) include Azure Kubernetes Service which provides scalable endpoint suitable for production use.Note that the service deployment can take several minutes.
###Code
from azureml.core.webservice import AciWebservice, Webservice
model_path = "model"
aci_config = AciWebservice.deploy_configuration(cpu_cores=2,
memory_gb=5,
tags={"data": "MNIST", "method" : "pytorch"},
description="Predict using webservice")
webservice, azure_model = mlflow.azureml.deploy(model_uri='runs:/{}/{}'.format(run.id, model_path),
workspace=ws,
deployment_config=aci_config,
service_name="pytorch-mnist-1",
model_name="pytorch_mnist")
###Output
_____no_output_____
###Markdown
Once the deployment has completed you can check the scoring URI of the web service.
###Code
print("Scoring URI is: {}".format(webservice.scoring_uri))
###Output
_____no_output_____
###Markdown
In case of a service creation issue, you can use ```webservice.get_logs()``` to get logs to debug. Make predictions using a web serviceTo make the web service, create a test data set as normalized PyTorch tensors. Then, let's define a utility function that takes a random image and converts it into a format and shape suitable for input to the PyTorch inferencing end-point. The conversion is done by: 1. Select a random (image, label) tuple 2. Take the image and converting the tensor to NumPy array 3. Reshape array into 1 x 1 x N array * 1 image in batch, 1 color channel, N = 784 pixels for MNIST images * Note also ```x = x.view(-1, 1, 28, 28)``` in net definition in ```train.py``` program to shape incoming scoring requests. 4. Convert the NumPy array to list to make it into a built-in type. 5. Create a dictionary {"data", <list>} that can be converted to JSON string for web service requests.
###Code
from torchvision import datasets, transforms
import random
import numpy as np
test_data = datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
def get_random_image():
image_idx = random.randint(0,len(test_data))
image_as_tensor = test_data[image_idx][0]
return {"data": elem for elem in image_as_tensor.numpy().reshape(1,1,-1).tolist()}
###Output
_____no_output_____
###Markdown
Then, invoke the web service using a random test image. Convert the dictionary containing the image to JSON string before passing it to web service.The response contains the raw scores for each label, with greater value indicating higher probability. Sort the labels and select the one with greatest score to get the prediction. Let's also plot the image sent to web service for comparison purposes.
###Code
%matplotlib inline
import json
import matplotlib.pyplot as plt
test_image = get_random_image()
response = webservice.run(json.dumps(test_image))
response = sorted(response[0].items(), key = lambda x: x[1], reverse = True)
print("Predicted label:", response[0][0])
plt.imshow(np.array(test_image["data"]).reshape(28,28), cmap = "gray")
###Output
_____no_output_____
###Markdown
You can also call the web service using a raw POST method against the web service
###Code
import requests
response = requests.post(url=webservice.scoring_uri, data=json.dumps(test_image),headers={"Content-type": "application/json"})
print(response.text)
###Output
_____no_output_____
###Markdown
Clean upYou can delete the ACI deployment with a delete API call.
###Code
webservice.delete()
###Output
_____no_output_____ |
IIMB-Assignments/Assgn-4/ref/.ipynb_checkpoints/Module3_Assignment2_Sayantan_Raha-v3-checkpoint.ipynb | ###Markdown
SAYANTAN RAHA Roll : BAI09056 IIMB - BAI09 - Assignment 2
###Code
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Toggle on/off Code"></form>''')
import scipy.stats as stats
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
###Output
_____no_output_____
###Markdown
Q 1.1We will use the following formula to calculate the coefficient of CRIM.\begin{equation*} \beta = r * \frac{SD_Y} {SD_X}\end{equation*}\begin{equation*}\text {where r = Correlation of X (CRIM) and Y (PRICE) &} \end{equation*}\begin{equation*}SD_x \text{= Standard deviation of X}\end{equation*}\begin{equation*}SD_y \text{= Standard deviation of Y}\end{equation*}From table 1.1 we can find SDx = 8.60154511 & SDy = 9.197From table 1.2 we can find r = -.388Using the above we can find:
###Code
sd_crim = 8.60154511
sd_price = 9.197
r = -.388
B1 = r * sd_price / sd_crim
print("B1 {}, implies as crime rate increases by 1 unit, unit price reduces by {} units (Lac INR)".format(B1, abs(B1)))
###Output
B1 -0.41485988323788486, implies as crime rate increases by 1 unit, unit price reduces by 0.41485988323788486 units (Lac INR)
###Markdown
Q 1.2The range of coefficients is given by:\begin{equation*} \beta \pm \text{t-crit *} SE_{beta}\end{equation*}where t-critical is the critical value of T for significance alpha.Interpretation: \begin{equation*} \beta =\text {Increase in Y as X changes by 1 Unit} \end{equation*}
###Code
n = 506
seb1 = 0.044
tcrit = abs(stats.t.ppf(0.025, df = 505))
print("T-critical at alpha {} and df {} is {}".format(0.05, 505, tcrit))
print("Min B1 {}".format(B1 + tcrit * seb1))
print("Max B1 {}".format(B1 - tcrit * seb1))
print("Price will reduce between 32K to 50K with 95% CI, hence his assumption that it reduces by at least 30K is correct")
###Output
T-critical at alpha 0.05 and df 505 is 1.964672638739595
Min B1 -0.3284142871333427
Max B1 -0.5013054793424271
Price will reduce between 32K to 50K with 95% CI, hence his assumption that it reduces by at least 30K is correct
###Markdown
Q 1.3Regression is valid for only the observed ranges of X (Predcitor). The min value of Crime rate = .0068 > 0. Hence it is incorrect to draw any conclusion about the predicted values of Y for Crim==0 as that value is unobserved.We cannot claim the value will be 24.03 Q 1.4Here Y predicted can be calculated from the regression equation:24.033 - 0.414 * 1 (Value of CRIM)For large values of n the range of Y-predicted is given by:\begin{equation*} \hat Y \pm \text{t-crit *} SE_{Y}\end{equation*}where t-critical is the critical value of T for significance alpha (0.05).
###Code
se = 8.484 #seb1 * sd_crim * (n - 1) ** 0.5
#print(se)
yhat = 24.033 - 0.414 * 1
yhat_max = (yhat + tcrit * se)
print("Max Value of Price for CRIM ==1 is {}".format(yhat_max))
###Output
Max Value of Price for CRIM ==1 is 40.28728266706672
###Markdown
Q 1.5Here Y predicted (mean value of regression) can be calculated from the regression equation:24.033 + 6.346 * 1 (Value of SEZ)t-critical is computed as:\begin{equation*} t = \frac {(t_o - t_{mean})} {SE_{estimate}} \end{equation*}We can calculate the probability using CDF of a normal Distribution. Since the value is >= 40 Lac, hence we will consider the right-tail of the t-value to compute the probability.
###Code
yhat = 22.094 + 6.346
print("Mean Regression value {}".format(yhat))
t = (40 - yhat) / 9.064
print("t-crit at alpha 0.05 is {}".format(t))
print("Y-pred follows a normal distribution. Probability of Price being at least 40 lac is {} percent".format(round((1 - stats.norm.cdf(t))* 100, 2)))
###Output
Mean Regression value 28.44
t-crit at alpha 0.05 is 1.2753751103265665
Y-pred follows a normal distribution. Probability of Price being at least 40 lac is 10.11 percent
###Markdown
Q 1.6 - aFrom the residual plot, by visual inspection we can see that the spread of standardised errors are higher for lower values of standardised prediction compared to higher values.Hence the variance of the residuals are not equal and it demonstrates heteroscedasticity Q 1.6 - b1. It is a right skewed distribution2. The left tail has less proportion of data than that of a normal distribution3. Between 40-80 % range the distribution has much less proportion of data compared to a normal distributionFrom observing the P-P plot we conclude there is considerable difference between this distribution and normal distribution. Q 1.6 - cBased on the above we can conclude that this regression equation may not be functionally correct. It may not be correct to rely on predictions using this model. Q 1.7The increase in R-squared when a new variable is added to a model is the given by the **Square of the Semi-Partial (PART) Correlation**.- From Table 1.7: R-squared @ Step 2 = 0.542- From Table 1.8: PART Correlation for adding RES = -.153
###Code
print("R-squared in Step 3 is {}".format(0.542 + (-.153) ** 2))
###Output
R-squared in Step 3 is 0.565409
###Markdown
Q 1.8It reduces as there is correlation among RM and CRIM. Part of what was explained by RM in model 1 is now being explained by CRIM in model 2 as CRIM and RM is correlated.Technically this is call Omitted Variable Bias. The reduction can be explained by the following equation:\begin{equation*} \alpha_{RM_{Model1}} = \beta_{RM_{Model2}} + \frac{\beta_{CRIM_{Model2}} * Cor(RM, CRIM)} {Var(RM)} \end{equation*}From Correlation table we see that RM and CRIM has negative Correlation, hence the overall value of coefficient for RM reduces
###Code
# Import the library
import matplotlib.pyplot as plt
from matplotlib_venn import venn3
# Make the diagram
v = venn3(subsets = (1, 1, 1, 1, 1, 1, 1), set_labels= ('PRICE', 'RM', 'CRIM'))
v.get_label_by_id('101').set_text('Y_CRIM')
v.get_label_by_id('110').set_text('Y_RM')
v.get_label_by_id('111').set_text('Y_RM_CRIM')
v.get_label_by_id('011').set_text('RM_CRIM')
plt.show()
###Output
_____no_output_____
###Markdown
Q 1.9We will use the model in step - 6 for answering this question. - Since the variables are not standardised we cannot use the magnitude of the coefficients as a measure of impact on dependent variable (Price)- We will use the notion of the Standardised Coefficients to measure how much 1 SD change in the variable X (Predictor) changes Y (dependant)- From Tables 1.1 and 1.8 we can easily obtain the Standardised Coefficients for the regression model for all variables except for RM as the SD of RM is not provided in table 1.1 and the Standardised coefficient of RM is not provided in table 1.8. Standardised Coefficient is calculated using: \begin{equation*} \beta_{STANDARDISED} = \hat\beta * \frac {S_X} {S_Y} \end{equation*}where \begin{equation*} \text{Standard Deviation X} = S_X \end{equation*}& \begin{equation*} \text{Standard Deviation Y} = S_Y \end{equation*}- To calculate the variance of RM we will use the Model 1 - In Model 1 the coefficient of RM is 9.102- Standardized Coefficient of RM = .695, SD of PRICE (Y) = 9.197- Using these values and rearranging the equation discussed above, we get SD of RM = .7022- From the below table we can see that **RM** has the highest impact on PRICE.
###Code
data = pd.DataFrame({"_": ["INTERCEPT","RM","CRIM","RES","SEZ","Highway", "AGE"]})
data["Coefficients"] = [-8.993, 7.182, -.194, -.318, 4.499, -1.154, -.077]
data["Standardized Coefficients"] = ['', (7.182 * .7022) / 9.197, -.194 * 8.60154511 / 9.197,
-.238, .124, .264,
-.077 * 28.1489 / 9.197]
data
###Output
_____no_output_____
###Markdown
Q 2.1Correct: ***1. The model explains 42.25% of variation in box office collection.******2. There are outliers in the model.******3. The residuals do not follow a normal distribution.***Incorrect:4.The model cannot be used since R-square is low.5.Box office collection increases as the budget increases. Q 2.2Here Budget (X) can never be = 0, as it may not be possible to produce a movie without money and X = 0 is unobserved i.e. X = 0 falls outside the domain of the observed values of the variable X. The relationship between the variables can change as we move outside the observed region. The Model explains the relationship between Y and X within the range of observed values only. We cannot predict for a point that is outside the range of observed values using the regression model. Hence Mr Chellapa's observation is incorrect Q 2.3Since the variable is insignificant at alpha = 0.05, hence the coefficient may not be different from zero. There is is no statistical validity that the collection of movie released in Releasing_Time Normal_Season is different from Releasing_Time Holiday_Season (which is factored in the intercept / constant).Since we do not have the data hence we cannot rerun the model without the insignificant variable. We will assume that the co-efficient is 0 and it's removal does not have any effect on the overall equation (other significant variables).Hence the difference is **Zero**.
###Code
y = 2.685 + .147
#print("With beta = .147 y = {}".format(y))
#print("With beta = 0 y = {}".format(2.685))
###Output
_____no_output_____
###Markdown
Q 2.4The beta for Release Normal Time is being considered as 0 as it is statistically insignificant at alpha. Hence it will be factored in the Intercept term. Releasing_Time Long_Weekend is statistically significant and the coefficient = 1.247. The range of values will be considered because of variability of the coefficient.SE =0.588, tCrit @ 0.05 = 1.964Max Value = Constant + tcrit * SEMIn Value = Constant - tcrit * SE
###Code
Bmax = np.exp(2.685 + 1.247 + 1.964 *.588)# - np.exp(2.685)
print("Max earning from Long weekend movie releases can be {}".format(Bmax))
Bmin = np.exp(2.685+1.247 - 1.964 *.588)
print("Min earning from Long weekend movie releases can be {}".format(Bmin))
print("Movies released in normal Weekends may earn on Average {}".format(np.exp(2.685)))
#print("Movies released in normal Weekends may earn on Average {}".format(np.exp(2.685 + .147)))
print("Movies released in Long Weekends may or may not earn at least 5 Cr more than movies released in normal season as the min difference is around 2 Cr")
print("Mr. Chellapa's statement is incorrect.")
###Output
Max earning from Long weekend movie releases can be 161.87622500117592
Min earning from Long weekend movie releases can be 16.073436458805958
Movies released in normal Weekends may earn on Average 14.658201380262703
Movies released in Long Weekends may or may not earn at least 5 Cr more than movies released in normal season as the min difference is around 2 Cr
Mr. Chellapa's statement is incorrect.
###Markdown
Q 2.5The increase in R-squared when a new variable is added to a model is the given by the **Square of the Semi-Partial (PART) Correlation**.The assumption here is the variable "Director_CAT C" was the last variable added to model at Step 6. We have to make this assumption as variables added in prior stages are not available.- From Table 2.5 : R-squared @ Step 5 = 0.810 ** 2 = .6561- From Table 2.6: PART Correlation for adding Director_CAT C = -.104
###Code
print("R-squared in Step 3 is {}".format(0.6561 + (-.104) ** 2))
###Output
R-squared in Step 3 is 0.6669160000000001
###Markdown
Q2.6- Budget_35_Cr is the highest impact on the performance of the movie. On average a move with budget exceeding 35 Cr adds 1.53 Cr extra than a move with lesser budget.- Recommendation: Use high enough budget to: - Hire Category A Production House - Do not hire Category C Director - Do not hire Category C Music Director - Produce a Comedy movie Q 2.7- We cannot say that the variables have no relationship to Y (BOX Office Collection)- We can conclude that in presence of the other variables the variables in Model 2 are not explaining additional information about Y
###Code
# Make the diagram
v = venn3(subsets = (1, 1, 1, 1, 1, 1, 1), set_labels= ('Y', 'A', 'B'))
v.get_label_by_id('101').set_text('Y_B')
v.get_label_by_id('110').set_text('Y_A')
v.get_label_by_id('111').set_text('Y_A_B')
v.get_label_by_id('011').set_text('A_B')
plt.show()
###Output
_____no_output_____
###Markdown
From chart above we can see that as we add new variables (A, B) it explains variations in Y. The explained variation in Y due to addition of a new variable should be significant enough. This is measured by:1. t-test for individual variable2. Partial F-test for the models generated consecutivelyWe may conclude that the variables of Model 2 may not be explaining significant variations in Y in presence of the additional variables added later on and hence was dropped. Q 2.8We are making the assumption that the variable Youtube views imply views of the actual movie and not the trailers before movie release dates. The following explanation will not be valid in that case. Also, we are assuming that revenue collected from advertisements during Youtube views do not fall under the Box Office Collection.Youtube_Views = Will not contribute anything meaningful functionally to the Box Office collection as the movie has been created and released in theaters and all possible collection is completed. The main essence of the prediction here is to understand before making a movie, what all factors may lead to better revenue collection for a movie Q 3.1 Table 3.1- **Observations** (N) = 543- **Standard Error** - \begin{equation*} SE = \sqrt {\frac{ \sum_{k=1}^N {(Y_k - \hat{Y_k})^2}} {N - 2}} \end{equation*} \begin{equation*} (Y_k - \hat{Y_k})^2 = \epsilon_k^2 = \text{Residual SS (SSE)} = \text{17104.06 (Table 3.2)}\end{equation*}- **R-Squared** = 1 - SSE / SST - SSE = 17104.06 (Table 3.2) - SST = 36481.89 (Table 3.2)- **Adjuated R-Squared** = 1 - (SSE / N-k-1) / (SST/N-1) - N = 543 - K = 3- **Multiple R** = \begin{equation*} \sqrt R_{Squared}\end{equation*}
###Code
x = ["Multiple R", "R Square", "Adjusted R Squared", "Standard Error", "Observations"]
data = pd.DataFrame({"Regression Statistics": x})
data["_"] = [(1 - 17104.06/36481.89) ** 0.5,1 - 17104.06/36481.89, 1 - (17104.06/(543 - 3 -1))/(36481.89/542),((17104.06)/541) ** 0.5,543]
data
###Output
_____no_output_____
###Markdown
Table 3.2- **DF Calculation** - DF for Regression (K) = Number of variables = 3 - DF for Residual = N - K - 1 = 539- **SS Calculation** - Residual SS (SSE) = 17104.06 (given) - Total SS (TSS)= 36481.89 (given) - Regression SS (SSR) = TSS - SSE = 19377.83- **MS Calculation** - MSR (Regression) = SSR / DF for SSR (=3) - MSE (Error) = SSE / DF for SSE (= 539)- **F Claculation** - F = MSR / MSE
###Code
x = ["Regression", "Residual", "Total"]
ss = [36481.89 - 17104.06, 17104.06,36481.89]
df = [3, 539,542]
ms = [19377.83 / 3, 17104 / 539, '']
f = [(19377.83 / 3) / (17104 / 539),'','']
sf = [1 - stats.f.cdf(305, 3, 539),'','']
data = pd.DataFrame({"_": x})
data["DF"] = df
data["SS"] = ss
data["MS"] = ms
data["F"] = f
data["SignificanceF"] = sf
data
###Output
_____no_output_____
###Markdown
Table 3.3 - Coefficients- MLR T-Test - \begin{equation*} t_i = \frac {\beta_i - 0} {Se(\beta_i)}\end{equation*} where i denotes the different variables (here i = 3)
###Code
data = pd.DataFrame({"_":["Intercept", "Margin", "Gender", "College"]})
data["Coefficeints"] = [38.59235, 5.32e-05, 1.551306, -1.47506]
data["Standard Error"] = [0.937225, 2.18e-06, 0.777806, 0.586995]
data["t Stat"] = [(38.59235 / 0.937225),5.32e-05 / 2.18e-06, 1.551306/0.777806, -1.47506/ 0.586995]
data["P-Value"] = ['','','','']
data["Lower 95%"] = [36.75129, 4.89E-05, 0.023404, -2.62814]
data["Upper 95%"] = [40.4334106,5.7463E-05,3.07920835,-0.3219783]
data
###Output
_____no_output_____
###Markdown
Q 3.2From the table above we see that for all the variables the t-value > 1.964. hence all the variables are significant. 1.964 = Critical value of t @ significance 0.05 Q 3.3F-distribution with DF = 3, 539 at significance = 95% is 2.621. Hence the model is significant.
###Code
1 - stats.f.cdf(2.621, 3, 539)
stats.f.ppf(0.95, 3, 539)
###Output
_____no_output_____
###Markdown
Q 3.4The increase in R-squared when a new variable is added to a model is the given by the **Square of the Semi-Partial (PART) Correlation**.- R-squared for Model 2 = 0.52567 (R1)- R-squared for Model 3 = 0.531163 (R2)Part Correlation of College & % Votes = \begin{equation*}\sqrt{R_2 - R_1} \end{equation*}
###Code
print("Increase in R-Squared due to adding College = {}".format(0.531163 - 0.52567))
print("Part Correlation of College & % Votes = {}".format((0.531163 - 0.52567)**0.5))
###Output
Increase in R-Squared due to adding College = 0.005493000000000081
Part Correlation of College & % Votes = 0.07411477585475167
###Markdown
Q 3.5We will conduct Partial F-test between models to test for significance of each model. We make the assumption that the variables added are significant at each step (model) at alpha 0.05\begin{equation*}F_{PARTIAL} = \frac{\frac{R_{FULL}^2 - R_{PARTIAL}^2} {k - r}} {\frac{1 - R_{FULL}^2} {N - k - 1}}\end{equation*}where k = variables in full model, r = variables in reduced model, N = Total number of records
###Code
def f_partial(rf, rp, n, k, r):
return ((rf **2 - rp ** 2)/(k-r))/((1 - rf ** 2)/ (n - k - 1))
print("Model 3 Partial F {}".format(f_partial(0.531163, 0.52567, 543, 3, 2)))
print("Model 3 Critical F at Df = (1, 539) {}".format(1 - stats.f.cdf(4.36, 1, 539)))
print("Model 4 Partial F {}".format(f_partial(0.56051, 0.531163, 543, 4, 3)))
print("Model 4 Critical F at Df = (1, 539) {}".format(1 - stats.f.cdf(25.13, 1, 539)))
print("Model 5 Partial F {}".format(f_partial(0.581339, 0.56051, 543, 5, 4)))
print("Model 5 Critical F at Df = (1, 539) {}".format(1 - stats.f.cdf(19.29, 1, 539)))
print("\nHence we can see that all the models are significant. The number of features (5) are not very high, hence we conclude it's justified to add the additional variables")
###Output
Model 3 Partial F 4.358744633992214
Model 3 Critical F at Df = (1, 539) 0.03726112108923041
Model 4 Partial F 25.131765753275783
Model 4 Critical F at Df = (1, 539) 7.281767351319246e-07
Model 5 Partial F 19.291406535763336
Model 5 Critical F at Df = (1, 539) 1.3522586193692732e-05
Hence we can see that all the models are significant. The number of features (5) are not very high, hence we conclude it's justified to add the additional variables
###Markdown
Q 3.6- Since the variables are not standardised we cannot use the magnitude of the coefficients as a measure of impact on dependent variable (Vote %)- We will use the notion of the Standardised Coefficients to measure how much 1 SD change in the variable X (Predictor) changes Y (dependant)- Using Table 3.5 and equations below we will compute Standardised Coefficient: \begin{equation*} \beta_{STANDARDISED} = \hat\beta * \frac {S_X} {S_Y} \end{equation*}where \begin{equation*} \text{Standard Deviation X} = S_X \end{equation*}& \begin{equation*} \text{Standard Deviation Y} = S_Y \end{equation*}- From the below table we can see that **MARGIN** has the highest impact on Vote %. 1 SD change in Margin changes .75 SD in Vote %
###Code
data = pd.DataFrame({"_": ["INTERCEPT","MARGIN","Gender","College","UP","AP"]})
data["Coefficients"] = [38.56993, 5.58E-05, 1.498308, -1.53774, -3.71439, 5.715821]
data["Standard deviation"] = ['', 111365.7, 0.311494, 0.412796, 0.354761, 0.209766]
data["Standardized Coefficients"] = ['', 5.58E-05 * 111365.7 / 8.204253, 1.498308 * 0.311494 / 8.204253,
-1.53774 * 0.412796 / 8.204253, -3.71439 * 0.354761 / 8.204253,
5.715821 * 0.209766 / 8.204253]
data
###Output
_____no_output_____
###Markdown
Q 4.1
###Code
positives = 353+692
negatives = 751+204
N = positives + negatives
print("Total Positives: {} :: Total Negatives: {} :: Total Records: {}".format(positives, negatives, N))
pi1 = positives / N
pi2 = negatives / N
print("P(Y=1) = positives / N = {} :: P(Y=0) = negatives /N = {}".format(pi1, pi2))
_2LL0 = -2* (negatives * np.log(pi2) + positives * np.log(pi1))
print("-2LL0 = {}".format(_2LL0))
###Output
Total Positives: 1045 :: Total Negatives: 955 :: Total Records: 2000
P(Y=1) = positives / N = 0.5225 :: P(Y=0) = negatives /N = 0.4775
-2LL0 = 2768.5373542564103
###Markdown
- -2LLo is called the "Null Deviance" of a model. It is -2 Log Likelihood of a model which had no predictor variables. Hence we obtain the probabilities of positive and negative in the dataset using the frequencies for such model.- After adding "Premium" 2LL reduces to 2629.318 (Table 4.2). Hence reduction is equal to (-2LLo -(-2LLm)):
###Code
print(2768.537 - 2629.318)
###Output
139.2189999999996
###Markdown
Q 4.2
###Code
print("True Positive :Actually Positive and Predicted Positive = {}".format(692))
print("False Positive :Actually Negative and Predicted Positive = {}".format(204))
print("Precision = True Positive / (True Positive + False Positive) = {}".format(692.0 / (692 + 204)))
###Output
True Positive :Actually Positive and Predicted Positive = 692
False Positive :Actually Negative and Predicted Positive = 204
Precision = True Positive / (True Positive + False Positive) = 0.7723214285714286
###Markdown
Q 4.3exp(B) = change in odds ratio. The odds ratio can be interpreted as the multiplicative adjustment to the odds of the outcome, given a **unit** change in the independent variable. In this case the unit of measurement for Premium (1 INR) which is very small compared to the actual Premium (1000s INR), hence a unit change does not lead to a meaningful change in odds ratio, subsequently the odds ratio will be very close to one. Q 4.4Assumptions: Actual Data was not available. Decision would be made based on outcome of Model results
###Code
print("The model predicts 751 + 353 = {} customers have a probability less than 0.5 of paying premium".format(
751+353))
print("They will call 1104 customers through Call Center")
###Output
The model predicts 751 + 353 = 1104 customers have a probability less than 0.5 of paying premium
They will call 1104 customers through Call Center
###Markdown
Q 4.5 Total points we are getting is 1960.total = tp + fp + fn + tn**Formula** :sensitivity = tp/ (tp + fn)specificity = tn / (tn + fp)recall = sensitivityprecision = tp / (tp + fp)f-score = 2 \* precision * recall / (precision + recall)
###Code
tp = 60.0
fp = 20.0
fn = 51*20
tn = 43 * 20
total = tp + fp + fn + tn
print("Number of records ::".format(total))
sensitivity = tp/ (tp + fn)
specificity = tn / (tn + fp)
recall = sensitivity
precision = tp / (tp + fp)
fsc = 2 * precision * recall / (precision + recall)
print("Precision {} :: \nRecall {} :: \nsensitivity {} :: \nspecificity {} :: \nf-score {}".format(precision, recall, sensitivity, specificity, fsc))
###Output
Number of records ::
Precision 0.75 ::
Recall 0.05555555555555555 ::
sensitivity 0.05555555555555555 ::
specificity 0.9772727272727273 ::
f-score 0.10344827586206895
###Markdown
Q 4.6Probability of Y==1 can be calculated using the following formula:\begin{equation*} P(Y=1) = \frac{\exp^z} {1 + \exp^z}\end{equation*}\begin{equation*} \text{where z} = \beta_0 + \beta_1 * Salaried + \beta_2 * HouseWife +\beta_3 * others\end{equation*}However in this case the variable Housewife is not a significant variable. Hence using this equation to calculate probability for the variable house wife may not be appropriate. We will procced to compute the probability using the equation but will consider the coefficient of Housewife as 0 (B is not significantly different from 0 for insignificant variables). Ideally we need to rerun the Model removing the insignificant variable, but since we do not have the data we will use the same equation and assume the coefficients for the other variables will not change if we had removed Housewife.
###Code
#print("Probability of House wife paying the Premium is (beta ==22.061): {}".format(np.exp(-.858 + 22.061)
# / (1 + np.exp(-.858 + 22.061))))
print("Probability of House wife paying the Premium is (beta = 0): {}".format(np.exp(-.858 + 0)
/ (1 + np.exp(-.858 + 0))))
print("Since Beta is insignificant B == 0, hence .298 is the probability for housewife paying renewal")
###Output
Probability of House wife paying the Premium is (beta = 0): 0.29775737226938176
Since Beta is insignificant B == 0, hence .298 is the probability for housewife paying renewal
###Markdown
Q 4.7The Constant / Intercept measures for people with the following occupations **Professionals, Business and Agriculture** and they have a lower probability of renewal payment. From Model 3 - Coefficient of intercept is negative, hence our conclusion Q 4.8Probability can be calculated using the following formula:\begin{equation*} P(Y=1) = \frac{\exp^z} {1 + \exp^z}\end{equation*}\begin{equation*} \text{where z} = constant + \beta_1 * Policy Term\end{equation*}The regression equations reduces to the simple term as shown above because SSC Education, Agriculturist Profession & Marital Status Single will be factored in the term constant of the given equation and the remainder of the variable will be Zero.
###Code
print("Probability : {}".format(np.exp(3.105 + 60 * -0.026)/ (1 + np.exp(3.105 + 60 * -0.026))))
###Output
Probability : 0.824190402911071
###Markdown
Q 4.9The coefficients tell about the relationship between the independent variables and the dependent variable, where the dependent variable is on the logit scale. These estimates tell the amount of increase in the predicted log odds that would be predicted by a 1 unit increase in the predictor, holding all other predictors constant.**Findings**:- Married People have higher possibility of renewals (log odds ratio increases)- As payment term increases it leads to slightly reduced log odds of renewals- Professionals, Business men have much higher chance of defaulting on log odds of renewals- Being a graduate does increase the chance of payment of renewals (log odds)- Annual / Half yearly / Quarterly policy renewal schemes see reduced payment of renewals (log odds)- Model Change - Premuim : Variable scale should be changed for better understanding of Premium's contribution to affinity to renew policy (may be reduce unit to 1000s)**Recommendations :**- For new customers target Married people and graduates- For existing customers send more reminders (via Call centers / messgaes etc) to Business men, Professionals for renewal- For people paying premiums in yearly / quarterly / halfyearly terms, send reminders to them before renewal dates- For people with long payment terms keep sending them payment reminders as the tenure of their engagement advances Q 4.10The bins are computes as following: - Decile=1 = 0 -.1 (both inclusive)- Decile=.9 = 1.00001 - .2 (both incusive and so on)- upto Decile1We arrange the table in descending order of probabilities, i.e. Decile=.1 contains .90001 till 1 probability values, Decile=.2 contain .800001 till 0.9 pronbability values.Gain is calculated as:\begin{equation*} gain = \frac {\text{cumulative number of positive obs upto decile i}} {\text {Total number of positive observations}} \end{equation*}Lift is calculated as:\begin{equation*} lift = \frac {\text{cumulative number of positive obs upto decile i}} {\text {Total number of positive observations upto decile i from random model}} \end{equation*}
###Code
data = pd.DataFrame({'Decile': [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1]})
data['posunits'] = [31, 0, 0, 0, 3, 5, 5, 4, 2, 1]
data['negunits'] = [0, 0, 0, 0, 0, 5, 11, 17, 12, 2]
data['posCountunits'] = data['posunits'] * 20
data['negCountunits'] = data['negunits'] * 20
avgPerDec = np.sum(data['posCountunits']) / 10
data['avgCountunits'] = avgPerDec
data['cumPosCountunits'] = data['posCountunits'].cumsum()
data['cumAvgCountunits'] = data['avgCountunits'].cumsum()
data['lift'] = data['cumPosCountunits'] / data['cumAvgCountunits']
data['gain'] = data['cumPosCountunits'] / data['posCountunits'].sum()
data['avgLift'] = 1
#print(df)
#### Plots
plt.figure(figsize=(15, 5))
plt.subplot(1,2,1)
plt.plot(data.avgLift, 'r-', label='Average Model Performance')
plt.plot(data.lift, 'g-', label='Predict Model Performance')
plt.title('Cumulative Lift Chart')
plt.xlabel('Deciles')
plt.ylabel('Normalised Model')
plt.legend()
plt.xlim(0, 10)
plt.subplot(1,2,2)
plt.plot(data.Decile, 'r-', label='Average Model Performance')
plt.plot(data.gain, 'g-', label='Predict Model Performance')
plt.title('Cumulative Gain Chart')
plt.xlabel('Deciles')
plt.ylabel('Gain')
plt.legend()
plt.xlim(0, 10)
data
###Output
_____no_output_____
###Markdown
**Observaions**- From gain we see that the model captures 76% positives by the fifth decile- From Lift we see for the 1st decile model captures 6 times more positives than an ordinary model, 3 times for second decile, 2 times for 3rd decile, 1.5 times for 4th decile and 1.27 times for the 5th decile Q 5
###Code
import statsmodels.api as sm
import statsmodels.formula.api as smf
from IPython.display import display
pd.options.display.max_columns = None
%load_ext rpy2.ipython
oakland = pd.read_excel("./Oakland A Data 1.xlsx", sheet_name='Attendance Data')
#oakland.info()
print("There are no Missing Values in Data")
oakland.describe()
import seaborn as sns
fig = plt.figure(figsize=(15,5))
ax = plt.subplot("121")
ax.set_title("Distribution plot for TIX")
sns.distplot(oakland.TIX)
ax = plt.subplot("122")
ax.set_title("Distribution plot for LOG(TIX)")
sns.distplot(np.log(oakland.TIX))
plt.show()
print("TIX is right skewed distribution. The log Transformed TIX is more of an approximate normal distribution.")
###Output
_____no_output_____
###Markdown
- Mark Nobel has played for 21.33% games for Oakland A during the period when the data was captured- We will perform a Two Sample T-test between the mean of TIX when Nobel Played vs When Nobel did not play to check wthether ther was any significant difference of mean between the two categories
###Code
sns.boxplot(x='NOBEL', y='TIX', data=oakland)
plt.show()
x1, S1, n1 = oakland.loc[oakland.NOBEL==1, "TIX"].mean(), oakland.loc[oakland.NOBEL==1, "TIX"].std(), oakland.loc[oakland.NOBEL==1, "TIX"].shape[0]
x2, S2, n2 = oakland.loc[oakland.NOBEL==0, "TIX"].mean(), oakland.loc[oakland.NOBEL==0, "TIX"].std(), oakland.loc[oakland.NOBEL==0, "TIX"].shape[0]
#x1, S1, n1 = np.mean(np.log(oakland.loc[oakland.NOBEL==1, "TIX"])), np.std(np.log(oakland.loc[oakland.NOBEL==1, "TIX"])), oakland.loc[oakland.NOBEL==1, "TIX"].shape[0]
#x2, S2, n2 = np.mean(np.log(oakland.loc[oakland.NOBEL==0, "TIX"])), np.std(np.log(oakland.loc[oakland.NOBEL==0, "TIX"])), oakland.loc[oakland.NOBEL==0, "TIX"].shape[0]
alpha = 0.05
adjustedAlpha = alpha
print("Alpha: {}".format(adjustedAlpha))
print("Mean TIX (x1) = {}, STD TIX = {} and number of games = {} with Nobel".format(x1, S1, n1))
print("Mean TIX (x1) = {}, STD TIX = {} and number of games = {} without Nobel".format(x2, S2, n2))
ho = "x1 - x2 <= 0"
ha = "x1 - x2 >0"
def pairwise_t_test(S1, S2, n1, n2, x1, x2, adjustedAlpha):
print("NUll Hypothesis: {}".format(ho))
print("Alternate Hypothesis: {}".format(ha))
print("This is 2 Sample T test, with unknown population SD and the SD of the two are unequal")
Su = ((S1 ** 2) / n1 + (S2 ** 2) / n2) ** 0.5
print("SE {}".format(Su))
df = np.math.floor(Su ** 4 / ((((S1 ** 2) / n1) ** 2) / (n1 -1) + (((S2 ** 2) / n2) ** 2) / (n2 -1)))
print("DF {}".format(df))
tstat = ((x1 - x2) - 0) /(Su)
print("T-stat {}".format(tstat))
print("This is a two sided T-Test")
#print("alpha/ Significance: {}".format(adjustedAlpha / 2))
print("Significant t-value at alpha - {} is : {}".format(adjustedAlpha , -1*stats.t.ppf(adjustedAlpha,
df = df)))
print("p-value:{} is greater than alpha({})".format(1 - stats.t.cdf(tstat, df = df), adjustedAlpha))
print("Hence we can retain the NULL Hypothesis (ho)")
pairwise_t_test(S1, S2, n1, n2, x1, x2, adjustedAlpha)
###Output
Alpha: 0.05
Mean TIX (x1) = 12663.5625, STD TIX = 11211.620411987733 and number of games = 16 with Nobel
Mean TIX (x1) = 10859.35593220339, STD TIX = 9357.940067245701 and number of games = 59 without Nobel
NUll Hypothesis: x1 - x2 <= 0
Alternate Hypothesis: x1 - x2 >0
This is 2 Sample T test, with unknown population SD and the SD of the two are unequal
SE 3056.228389809929
DF 21
T-stat 0.5903376114861676
This is a two sided T-Test
Significant t-value at alpha - 0.05 is : 1.7207429028118777
p-value:0.2806319971052741 is greater than alpha(0.05)
Hence we can retain the NULL Hypothesis (ho)
###Markdown
- In general we see that there is not statistical evidence that a single factor, presence of Nobel has any effect on increasing ticket sales - We will check whether this factor become important in presence of other factors before drawing any final conclusions
###Code
corr = oakland[["TIX","OPP","POS","GB","DOW","TEMP","PREC","TOG","TV","PROMO","NOBEL","YANKS","WKEND","OD","DH"]].corr(method='pearson')
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(12, 12))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(255, 150, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
###Output
_____no_output_____
###Markdown
- From the correlation plot above we see that "Game with YANKS" and PROMO along with whether the match is a "DOUBLE HEADER" has high correlation to TIX sales **We will now create a series of Regression Models to check the validity of the claim that MARK NOBEL's presence increase the TIX and revenue generation for OAKLAND A**- From the plots of TIX we noticed that TIX is not normally distributed. The Regression Model developed with TIX may end up with Error terms which are not Normally distributed- To address this issue we will build the models using the Log transformed values of TIX, as from the plot it is clear that the log transformed variable is closer to a Normal Distribution.
###Code
y = np.log(oakland.TIX.values)
cols2use = "NOBEL"
x = oakland[cols2use]
#lg_model_1 = sm.OLS(y, sm.add_constant(x)).fit()
#lg_model_1.summary()
#lg_model_1.params
#lg_model_1.summary2()
%%R -i x -i y -w 800 -h 400
library(caret)
x = data.frame(x)
x$x = as.factor(x$x)
y = data.frame(y)
y$y = as.numeric(y$y)
#print(str(y$y))
#print(str(x))
objControl <- trainControl(method = "none", returnResamp = 'final',
summaryFunction = defaultSummary,
#summaryFunction = twoClassSummary, defaultSummary
classProbs = FALSE,
savePredictions = TRUE)
set.seed(766)
reg_caret_model <- train(x,
y$y,
method = 'lm',
trControl = objControl,
metric = "Rsquared",
tuneGrid = NULL,
verbose = FALSE)
#print(plot(varImp(reg_caret_model, scale = TRUE)))
print(summary(reg_caret_model))
par(mfrow = c(2, 2))
print(plot(reg_caret_model$finalModel))
###Output
_____no_output_____
###Markdown
- As noticed with the Hypothesis test, from the model above we can see that on its own the variable checking for the presence of Nobel is not Significant in predicting for TIX**We will build a Model with NOBEL, YANKS, DH and PROMO**
###Code
y = np.log(oakland.TIX.values)
cols2use = ["NOBEL", "YANKS", "DH", "PROMO" ]
x = oakland[cols2use]
%%R -i x -i y -w 800 -h 400
library(caret)
x = data.frame(x)
x$NOBEL = factor(x$NOBEL)
x$YANKS = factor(x$YANKS)
x$DH = factor(x$DH)
x$PROMO = factor(x$PROMO)
y = data.frame(y)
y$y = as.numeric(y$y)
#print(str(y$y))
#print(str(x))
objControl <- trainControl(method = "none", returnResamp = 'final',
summaryFunction = defaultSummary,
#summaryFunction = twoClassSummary, defaultSummary
classProbs = FALSE,
savePredictions = TRUE)
set.seed(766)
reg_caret_model <- train(x,
y$y,
method = 'lm',
trControl = objControl,
metric = "Rsquared",
tuneGrid = NULL,
verbose = FALSE)
#print(plot(varImp(reg_caret_model, scale = TRUE)))
print(summary(reg_caret_model))
par(mfrow = c(2, 2))
print(plot(reg_caret_model$finalModel))
###Output
_____no_output_____
###Markdown
- As noticed with the Hypothesis test, from the model above we can see that the variable checking for the presence of Nobel is not Significant in predicting for TIX**We will build a Stepwise Model with all variables and select the best model. If the variable NOBEL is significant it will be added by the STEPWISE Selection Algorithm**
###Code
y = np.log(oakland.TIX.values)
cols2use = ["OPP","POS","GB","DOW","TEMP","PREC","TOG","TV","PROMO","NOBEL","YANKS","WKEND","OD","DH"]
x = oakland[cols2use]
%%R -i x -i y -w 800 -h 400
library(caret)
x = data.frame(x)
x$NOBEL = factor(x$NOBEL)
x$YANKS = factor(x$YANKS)
x$DH = factor(x$DH)
x$PROMO = factor(x$PROMO)
x$OPP = factor(x$OPP)
x$POS = factor(x$POS)
x$GB = factor(x$GB)
x$DOW = factor(x$DOW)
x$PREC = factor(x$PREC)
x$TOG = factor(x$TOG)
x$TV = factor(x$TV)
x$WKEND = factor(x$WKEND)
x$OD = factor(x$OD)
y = data.frame(y)
y$y = as.numeric(y$y)
#print(str(y$y))
#print(str(x))
objControl <- trainControl(method = "none", returnResamp = 'final',
summaryFunction = defaultSummary,
#summaryFunction = twoClassSummary, defaultSummary
classProbs = FALSE,
savePredictions = TRUE)
set.seed(766)
reg_caret_model <- train(x,
y$y,
method = 'lmStepAIC',
trControl = objControl,
metric = "Rsquared",
tuneGrid = NULL,
verbose = FALSE)
print(plot(varImp(reg_caret_model, scale = TRUE)))
print(summary(reg_caret_model))
par(mfrow = c(2, 2))
print(plot(reg_caret_model$finalModel))
###Output
_____no_output_____
###Markdown
**From the models created above including building the stepwise regression model and the analysis done above we can see that presence of Nobel is not Significant in increasing Ticket Sales and Revenue collected from Ticket sales.He Does not have any contribution to increased revenue colection due to ticket Sales.** Q6 Q6-1- NPS is a KPI which is used by many organizations to understand and measure customer satisfaction- Organizations also believe that it is important for every organization to know what their customers tell their friends about the organization. NPS is considered by many organizations as a measurement of whether a customer will recommend the company or product/service to a friend or colleague**Business Problem**- Managment at Manipal Hospitals belived that loyalty in healthcare depends on technical and emotional aspects- Happy customer may lead to new business, unhapy customers may lead to lack of new business / erosion of exising business- Through NPS forms they wanted to collect customer feedback and sentiments- By analysing the NPS data they also wanted to understand the reasons that led to the customer giving such a NPS score- They wanted to analyse the reasons that would help to resolve the issues and then keeping the customers informed about the corrective actions; they believed they could improve the customer satisfaction and hence the NPS by such action**How Analytics can help with the Problem**- The historical paper based feedback when conevrted into digital data and the digital data captured post March 2014 can be analysed using analytics to derive insights- By analysing past data, analytics can help unearth patterns in data that may be related to high or low customer statisfaction and NPS- These patterns can be formualted into prescriptive actions which can help improve the process for the future there by improving the overall customer satisfaction and better NPS - If analytics can help link customer demographics / behaviour to NPS then hospital can devise different startegies for different customer profiles, which also can lead to better NPS and satisfied customer Q6-2Sensitivity, Specificity for a multinomial / 3-class problem can be calculated in the following manner. We will elaborate the method using the following tables and derive the formula for the metrics.total records = tp + fp + fn + tnFor 2-class the following are the definition for sensitivity and specificity:sensitivity = tp/ (tp + fn)specificity = tn / (tn + fp)where tp = True positive fp = False Postive tn = True Negative fn = False NegativeThe definition for Specificity / sensitivity does not change from the above in 3-class scenario. The way we compute the tp, tn, fp and fn changes. We will demonstrate the same below. Lets say we have 3 classes A, B, C. Step 1: We will construct the Confusion Matrix for "A". Table belows shows FP1 and FP2 etc. information.Here : fp = FP1 + FP2 fn = FN1 + FN2 tn = Sum(X) The formula for the metrics changes to:sensitivity = tp/ (tp + fn1 + fn2)specificity = tn / (tn + fp1 + fp2)
###Code
array1 = pd.MultiIndex.from_arrays(np.array([['Predcited', '', ''],['A', 'B', 'C']]))
array2 = pd.MultiIndex.from_arrays(np.array([['Actual', '', ''],['A', 'B', 'C']]))
array1
data = data = pd.DataFrame(np.array([['TP', 'FN1', 'FN2'], ['FP1', 'X', 'X'], ['FP2', 'X', 'X']]),
columns=array1, index=array2)
data
###Output
_____no_output_____
###Markdown
Step 2: We will construct the Confusion Matrix for "B". Table belows shows FP1 and FP2 etc. information.Here: fp = FP1 + FP2 fn = FN1 + FN2 tn = sum(X) The formula for the metrics changes to:sensitivity = tp/ (tp + fn1 + fn2)specificity = tn / (tn + fp1 + fp2)
###Code
array1 = pd.MultiIndex.from_arrays(np.array([['Predcited', '', ''],['A', 'B', 'C']]))
array2 = pd.MultiIndex.from_arrays(np.array([['Actual', '', ''],['A', 'B', 'C']]))
array1
data = data = pd.DataFrame(np.array([['X', 'FP1', 'X'], ['FN1', 'TP', 'FN2'], ['X', 'FP2', 'X']]),
columns=array1, index=array2)
data
###Output
_____no_output_____
###Markdown
Step 3: We will construct the Confusion Matrix for "C". Table belows shows FP1 and FP2 etc. information.Here : fp = FP1 + FP2 fn = FN1 + FN2 tn = sum(X) The formula for the metrics changes to:sensitivity = tp/ (tp + fn1 + fn2)specificity = tn / (tn + fp1 + fp2)
###Code
array1 = pd.MultiIndex.from_arrays(np.array([['Predicted', '', ''],['A', 'B', 'C']]))
array2 = pd.MultiIndex.from_arrays(np.array([['Actual', '', ''],['A', 'B', 'C']]))
array1
data = data = pd.DataFrame(np.array([['X', 'X', 'FP1'], ['X', 'X', 'FP2'], ['FN1', 'FN2', 'TP']]),
columns=array1, index=array2)
data
###Output
_____no_output_____
###Markdown
Q6-3 Binary Classification Model Train Data Source: Training Data or Binary Class - tab Test Data Source: Test Data for Binary Class - tab
###Code
train_df = pd.read_excel("./IMB NPS 651.xlsx", sheet_name='Training Data or Binary Class')
test_df = pd.read_excel("./IMB NPS 651.xlsx", sheet_name='Test Data for Binary Class')
#train_df.info()
print("There are no Nulls in data, hence missing value treatment is not required.")
columns2Drop=["CE_NPS", "AdmissionDate", "DischargeDate", "HospitalNo2", "SN"]
train_df.drop(columns2Drop, inplace = True, axis = 'columns')
test_df.drop(columns2Drop, inplace = True, axis = 'columns')
pd.options.display.max_columns = None
#train_df.describe()
train_df['NPS_bin'] = 0
train_df.loc[train_df.NPS_Status != "Promotor", 'NPS_bin'] = 1
#train_df.describe()
test_df['NPS_bin'] = 0
test_df.loc[test_df.NPS_Status != "Promotor", 'NPS_bin'] = 1
train_df.drop(['NPS_Status'], axis = 'columns', inplace = True)
test_df.drop(['NPS_Status'], axis = 'columns', inplace = True)
catCols = train_df.select_dtypes(exclude=["number","bool_"]).columns
#
#for c in catCols:
# print(train_df[["NPS_bin"] + [c]].groupby([c]).agg([np.mean, np.std, len]))
#catCols = train_df.select_dtypes(exclude=["number","bool_"]).columns
#for c in catCols:
# print(test_df[["NPS_bin"] + [c]].groupby([c]).agg([np.mean, np.std, len]))
###Output
_____no_output_____
###Markdown
- There are 5000 records approximately- To reduce initial complexity and to improve the ability of the model to generalise, we will not encode any variable which has less than 100 rows per category into seperate encoded variables, but merge all such variables into one bucket (constant / others / intercept)- Please note 100 is not a magic number, and its not deduced by any statistical / mathematical way; more complex testing can be performed for optimality of such number, but we will keep things simple for now- Also the count is based on training set and not testing set- For Dep column: "GEN" is the base category- Estimated cost is at a whole different range, hence we will take a Log transform of estimated cost- Promoter is encoded as 0 and Passive & Detractors are encoded as 1
###Code
train_df["marital_status"]= 0
train_df.loc[train_df.MaritalStatus == "Married", 'marital_status'] = 1
test_df["marital_status"]= 0
test_df.loc[test_df.MaritalStatus == "Married", 'marital_status'] = 1
train_df.drop('MaritalStatus', axis = 'columns', inplace=True)
test_df.drop('MaritalStatus', axis = 'columns', inplace=True)
train_df["gender"]= 0
train_df.loc[train_df.Sex == "M", 'gender'] = 1
test_df["gender"]= 0
test_df.loc[test_df.Sex == "M", 'gender'] = 1
train_df.drop('Sex', axis = 'columns', inplace=True)
test_df.drop('Sex', axis = 'columns', inplace=True)
trainrows = train_df.shape[0]
train_test = pd.concat([train_df, test_df], axis='rows')
cols2use = ['BedCategory', 'Department', 'InsPayorcategory', 'State', 'Country', 'STATEZONE']
for c in cols2use:
xx = pd.get_dummies(train_test[c])
interim = train_df[["NPS_bin"] + [c]].groupby([c], as_index = False).agg([len]).reset_index()
interim.columns = [''.join(x) for x in interim.columns]
interim.columns = ['x', 'y']
cols = interim.loc[interim.y >= 100, 'x']
xx = xx[cols]
train_test.drop(c, axis='columns', inplace = True)
train_test = pd.concat([train_test, xx], axis = 'columns')
train_test.drop('GEN', axis = 'columns', inplace = True)
train_test['Estimatedcost'] = np.log1p(train_test['Estimatedcost'] )
train_df = train_test.iloc[:trainrows, :]
test_df = train_test.iloc[trainrows:, :]
import gc
del(xx, interim, cols, cols2use, columns2Drop, train_test)
gc.collect()
%%R -i train_df
library(caret)
for (f in colnames(train_df))
{
if (class(train_df[[f]])=="character")
{
train_df[[f]] <- as.integer(train_df[[f]])
}
}
y = as.factor(train_df$NPS_bin)
train_df$NPS_bin = NULL
levels(y) <- make.names(levels(factor(y)))
print(levels(y))
objControl <- trainControl(method = "none", returnResamp = 'final',
summaryFunction = twoClassSummary,
#summaryFunction = twoClassSummary, defaultSummary
classProbs = TRUE,
savePredictions = TRUE)
lgCaretModel <- train(train_df,
y,
method = 'glmStepAIC',
trControl = objControl,
metric = "ROC",
verbose = TRUE)
plot(varImp(lgCaretModel, scale = TRUE))
print(summary(lgCaretModel))
par(mfrow = c(2, 2))
print(plot(lgCaretModel$finalModel))
caretPredictedClass = predict(object = lgCaretModel, train_df, type = 'raw')
confusionMatrix(caretPredictedClass,y)
###Output
_____no_output_____
###Markdown
**We run a stepwise model and select important varibales at significance of 0.1****We rebuild the model with just the signifact factors**- Details of the models is as below
###Code
cols4logit = ['CE_CSAT', 'CE_VALUEFORMONEY', 'EM_NURSING', 'AD_TARRIFFPACKAGESEXPLAINATION',
'AD_STAFFATTITUDE', 'INR_ROOMCLEANLINESS', 'INR_ROOMAMBIENCE', 'FNB_FOODQUALITY', 'FNB_FOODDELIVERYTIME',
'FNB_STAFFATTITUDE', 'AE_PATIENTSTATUSINFO', 'AE_ATTENDEEFOOD', 'DOC_TREATMENTEXPLAINATION',
'DOC_VISITS', 'NS_NURSESATTITUDE', 'OVS_OVERALLSTAFFPROMPTNESS', 'OVS_SECURITYATTITUDE',
'DP_DISCHARGEQUERIES', 'PEDIATRIC','GENERAL', 'ULTRA SPL', 'RENAL', 'CORPORATE',
'Karnataka', 'EXEMPTION']
#,'EXEMPTION','EM_IMMEDIATEATTENTION', 'LengthofStay', 'ORTHO', "INDIA", "EAST", 'Estimatedcost', ]
import statsmodels.api as sm
lg_model_1 = sm.GLM(train_df['NPS_bin'], sm.add_constant(train_df[cols4logit]),family=sm.families.Binomial()).fit()
lg_model_1.summary()
train_df_predict_1 = lg_model_1.predict(sm.add_constant(train_df[cols4logit]))
test_df_predict_1 = lg_model_1.predict(sm.add_constant(test_df[cols4logit]))
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
#confusion_matrix(test_df.NPS_bin, test_df_predict_1.values >= 0.5)
def draw_cm( actual, predicted ):
plt.figure(figsize=(9,9))
cm = metrics.confusion_matrix( actual, predicted )
sns.heatmap(cm, annot=True, fmt='.0f', cmap = 'Blues_r')
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.title('Classification Matrix Plot', size = 15);
plt.show()
draw_cm(test_df.NPS_bin, test_df_predict_1 >=0.5)
def draw_roc( actual, probs ):
fpr, tpr, thresholds = metrics.roc_curve( actual, probs, drop_intermediate = False )
auc_score = metrics.roc_auc_score( actual, probs )
plt.figure(figsize=(10, 10))
plt.plot( fpr, tpr, label='ROC curve (area = %0.2f)' % auc_score )
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate or [1 - True Negative Rate]')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
return fpr, tpr, thresholds
fpr, tpr, thresholds = draw_roc(test_df.NPS_bin, test_df_predict_1 >=0.5)
###Output
_____no_output_____
###Markdown
- The Regression has been set up to identify detractors and understand reasons that may lead to a not such a good score- This is not a model to understand on day 1 when a customer comes in whether he will turn out to be a detractor or not. Such a model will be based on Customer Demograhics and other customer attributes vs NPS_Score. This model includes NPS_Scores provided by customers for individual departments which will be not available for a new customer. Hence using this model for such analysis may not be prudent**Observations**- Areas to improve: As these are coming out as key features leading to higher Detractors / Passive responders - Admission Staff Attitude - Cleanliness and Hygiene of the Room and Bath Room - Karnataka residents are more dis-satisfied - Helpfulness or lack of it of security staff - Nursing Attitude - Food and Beverage Staff Attitude - Some areas that are working well for them: - Prompt response to concerns or complaints made - Regular process updates and visits by Doctors - Emergency Nursing - Explanation of tariff & packages available - Guidance and Information on Patient Health Status **Recommendations**- Focus on Staff and Nurse behavioural training- Improve room and bathroom hygiene- Given a large number of patients are from Karnataka, and given these people have a higher chance of giving poor NPS_Scores, it is advisable to understand the need of patients from this geographic region and if possible cater to those needs. A follow up study can be conducted to understand the need of people from these regions to further improve their scores. Q6-4 Ordinal Logistic Classification Model Train data source : Training Data for Multi-Class M - tab Test data source : Test Data for Multi-Class Model
###Code
train_df = pd.read_excel("./IMB NPS 651.xlsx", sheet_name='Training Data for Multi-Class M')
test_df = pd.read_excel("./IMB NPS 651.xlsx", sheet_name='Test Data for Multi-Class Model')
#train_df.info()
print("There are no Nulls in data, hence missing value treatment is not required.")
columns2Drop=["CE_NPS", "AdmissionDate", "DischargeDate", "HospitalNo2", "SN"]
train_df.drop(columns2Drop, inplace = True, axis = 'columns')
test_df.drop(columns2Drop, inplace = True, axis = 'columns')
train_df["marital_status"]= 0
train_df.loc[train_df.MaritalStatus == "Married", 'marital_status'] = 1
test_df["marital_status"]= 0
test_df.loc[test_df.MaritalStatus == "Married", 'marital_status'] = 1
train_df.drop('MaritalStatus', axis = 'columns', inplace=True)
test_df.drop('MaritalStatus', axis = 'columns', inplace=True)
train_df["gender"]= 0
train_df.loc[train_df.Sex == "M", 'gender'] = 1
test_df["gender"]= 0
test_df.loc[test_df.Sex == "M", 'gender'] = 1
train_df.drop('Sex', axis = 'columns', inplace=True)
test_df.drop('Sex', axis = 'columns', inplace=True)
trainrows = train_df.shape[0]
train_test = pd.concat([train_df, test_df], axis='rows')
cols2use = ['BedCategory', 'Department', 'InsPayorcategory', 'State', 'Country', 'STATEZONE']
train_test.loc[train_test.BedCategory == "SPECIAL", "BedCategory"] = "BedCategory_SPECIAL"
train_df.loc[train_df.BedCategory == "SPECIAL", "BedCategory"] = "BedCategory_SPECIAL"
test_df.loc[test_df.BedCategory == "SPECIAL", "BedCategory"] = "BedCategory_SPECIAL"
for c in cols2use:
xx = pd.get_dummies(train_test[c])
interim = train_df[["NPS_Status"] + [c]].groupby([c], as_index = False).agg([len]).reset_index()
interim.columns = [''.join(x) for x in interim.columns]
interim.columns = ['x', 'y']
cols = interim.loc[interim.y >= 150, 'x']
xx = xx[cols]
train_test.drop(c, axis='columns', inplace = True)
train_test = pd.concat([train_test, xx], axis = 'columns')
train_test.drop('GEN', axis = 'columns', inplace = True)
train_test.loc[train_test.NPS_Status == "Passive", "NPS_Status"] = "BasePassive"
train_test['Estimatedcost'] = np.log1p(train_test['Estimatedcost'] )
train_df = train_test.iloc[:trainrows, :]
test_df = train_test.iloc[trainrows:, :]
import gc
del(xx, interim, cols, cols2use, columns2Drop, train_test)
gc.collect()
cols4logit = list(set(train_df.columns)-set(['NPS_Status']))
import statsmodels.api as sm
import statsmodels.formula.api as smf
stats.chisqprob = lambda chisq, df: stats.chi2.sf(chisq, df)
lg_model_1 = sm.MNLogit(train_df['NPS_Status'], sm.add_constant(train_df[cols4logit])).fit()
#lg_model_1.summary()
# Get significant variable
def get_significant_vars (modelobject):
var_p_vals_df = pd.DataFrame(modelobject.pvalues)
var_p_vals_df['vars'] = var_p_vals_df.index
var_p_vals_df.columns = ['pvals0', 'pvals1','vars']
return list(var_p_vals_df[(var_p_vals_df.pvals0 <= 0.05)|(var_p_vals_df.pvals1 <= 0.05) ]['vars'])
significant_vars_1 = get_significant_vars(lg_model_1)
#significant_vars_1
# build proper model
cols4logit = significant_vars_1[1:]
import statsmodels.api as sm
import statsmodels.formula.api as smf
stats.chisqprob = lambda chisq, df: stats.chi2.sf(chisq, df)
lg_model_1 = sm.MNLogit(train_df['NPS_Status'], sm.add_constant(train_df[cols4logit])).fit()
lg_model_1.summary()
# Predictions and Confusion Matrix
train_df_predict_1 = lg_model_1.predict(sm.add_constant(train_df[cols4logit]))
test_df_predict_1 = lg_model_1.predict(sm.add_constant(test_df[cols4logit]))
test_df_predict_1
values = np.argmax(test_df_predict_1.values, axis=1)
finPred = pd.DataFrame({"NPS_Status": test_df.NPS_Status})
finPred['predVal'] = values
finPred['pred'] = 'X'
finPred.loc[finPred.predVal==0, 'pred'] = 'BasePassive'
finPred.loc[finPred.predVal==1, 'pred'] = 'Detractor'
finPred.loc[finPred.predVal==2, 'pred'] = 'Promotor'
pd.crosstab(finPred.NPS_Status, finPred.pred)
#print(test_df_predict_1.head())
#np.sum(test_df.NPS_Status=="Promotor")
###Output
_____no_output_____ |
notebooks/tutorials/matrix_creation/numerical_matrix_building_tools.ipynb | ###Markdown
Building a matrix for numerical methods using a Landlab grid(Greg Tucker, University of Colorado Boulder, July 2020)*This notebook explains how to use the matrix-building functions to construct a matrix for a finite-volume or finite-difference solution on a Landlab grid.* IntroductionNumerical solutions to differential equations often involve matrices. With grid-based numerical models, like those that Landlab is designed to support, the problem is *discretized* in space: we solve for one or more state variables of interest at a series of discrete spatial locations, such as a grid node or the cell that surrounds it. That process of discretization converts a partial differential equation into a set of ordinary differential equations, with one equation per point. Consider, for example, the one-dimensional diffusion equation:$$\frac{\partial \eta}{\partial t} = D\frac{\partial^2 \eta}{\partial x^2}$$where $t$ is time, $x$ is distance, $D$ is a transport coefficient, and $\eta$ could be concentration of a dissolved chemical (classic chemical diffusion), the temperature in a solid (heat diffusion), the velocity of flow in a viscous liquid (viscous momentum diffusion), or the height of the land on a hillslope (soil diffusion). If the domain is discretized such that we seek the value of $\eta$ at a series of discrete points, then the above equation for a given point $i$ becomes:$$\frac{d \eta_i}{d t} = D\frac{d^2 \eta}{d x^2}\bigg\rvert_i$$where the subscript at the right means "evaluated at $i$". Once the right side has been cast in terms of values of $\eta$ at particular points, you end up with a linear system of equations, and matrix methods provide a natural way to solve them. One example among many is an implicit finite-difference solution to the one-dimensional form of the diffusion equation, which involves constructing a matrix and a "right-hand side" vector, inverting the matrix, and multiplying it by the vector to obtain a solution for the state variable at each grid node (for more on that particular example, see *Mathematical Modeling of Earth's Dynamical Systems* by Slingerland and Kump, or *Numerical Recipes* by Press et al.).When using matrix methods to solve a set of equation at discrete points, whether in 2D or 1D (or even 3D), you typically have an $MxM$ matrix, where $M$ is the number of solution points. Each row in the matrix represents the equation for one of the points. If the equation for a given point includes terms that represent, say, two of its immediately neighboring points, then the columns representing those two points contain non-zero entries. More generally, finite-volume and finite-difference matrices tend be sparse, with only a few non-zero entries in each row: the column that represents the point itself, and the columns representing its immediate neighbors.Building the matrix therefore requires knowledge of which points are connected to which other points in the grid. In 1D, this is easy. In 2D, it's a bit more complicated. Fortunately, the structure of a Landlab grid lends itself to this task. In particular, we know the connectivity for the *nodes* in the grid. It also turns out that when nodes are the solution points (as is typical), the number of equations---and thus $M$---corresponds exactly to the number of *core nodes* in the grid.In the following, we first work through the mathematics in a simple example: a finite-volume matrix solution to a steady diffusion equation with a source term, also known as a Poisson equation. We then show some worked examples of the Landlab matrix tools in action. Example: steady diffusion with a source termConsider the diffusion model for hillslope evolution in two dimensions. The equation describes the time evolution of land surface height, $z$, given a transport coefficient $D$ $[L^2/T]$, and relative uplift rate $U$ $[L/T]$ as:$$\frac{\partial z}{\partial t} = U - \nabla \cdot (-D \nabla z)$$Here $\nabla z$ is the gradient of $z$, which here is a two-element vector (components in the $x$ and $y$ directions, respectively), and $\nabla\cdot$ is the divergence operator. We'll use a matrix method to solve for $z(x)$ when the time derivative is zero. So the equation we want to solve is:$$U \nabla \cdot (-D \nabla z) = 0$$If $D$ is spatially uniform, we can write this as:$$\boxed{\nabla^2 z = -U/D}$$This is the equation we're going to discretize and solve. Here $\nabla^2$ is understood to be the divergence-of-the-gradient, and in 1D would just be a second derivative:$$\frac{d^2z}{dx^2} = -\frac{U}{D}$$The minus sign is important: it indicates upward convexity of the solution when $U$ and $D$ are positive (which they always are in this case). Finite-volume discretizationLet's take a step back in the derivation of the diffusion equation to note that it is composed of two parts together. One part is mass conservation:$$\frac{\partial z}{\partial t} = U - \nabla \cdot \mathbf{q}$$where $\mathbf{q}$ is soil volume flux per unit width $[L^2/T]$. The other part is the flux law:$$\mathbf{q} = -D\nabla z$$For this example, we'll set the time derivative to zero, meaning we are looking for a steady solution.Next, we integrate the conservation law over a 2D region $R$. In general, $R$ is a simply connected region. Ultimately for us, it will be a grid cell, which could be a square, a rectangle, a hexagon, or even an irregular polygon.$$\int\int_R \nabla\cdot \mathbf{q} dR = \int\int_R U dR$$Because $U$ is constant inside the region $R$,$$\int\int_R \nabla\cdot \mathbf{q} dR = U A_r$$Now we apply Green's theorem, which basically says that an area integral over the divergence of a vector field is equivalent to a line integral of the surface-normal component of that vector around the perimeter of the region. Intuitively, if we consider $\mathbf{q}$ to be a flux in this case, what we're saying is that we can obtain the net total flux over the region (grid cell!) by integrating the flux all around the perimeter. Think of it as keeping track of all the people who enter or leave the perimeter of a playing field.$$\oint_S \mathbf{q} \cdot\mathbf{n} dS = U A_r$$where $\mathbf{n}$ is an (outward-facing) unit vector perpendicular to the perimeter $S$ that encloses region $R$. For us, again the perimeter is just the perimeter of the grid cell: the four sides of a square or rectangle, or the six side of a hexagon, the $N$ sides of a Voronoi polygon, or whatever. Then the line integral becomes a summation.We will define a quantity $q$ that represents the face-normal component of $\mathbf{q}$. The sign convention is as follows:- $q$ is positive if the vector orientation is toward the upper-right half space (including "right" and "up")- $q$ is negative if the vector orientation is toward the lower-left half space (including "left" and "down")We will also define a binary variable $\delta$, which is negative if the outward surface-normal points toward the lower-left half space, and positive if it points toward the upper-right half space.Here's where Landlab grids come into the picture. The two definitions represent the use of *links* in a Landlab grid: when $q$ is positive when it oriented in the link's direction, and negative when oriented in the opposite direction. In a simple raster grid, where the links are all horizontal or vertical, the interpretation is very simple: flow to the right (increasing $x$) is positive, and to the left is negative; flow upward (increasing $y$) is positive, and downward is negative.More generally, whatever the grid type, links by convention always "point" toward the upper-right half space; hence the general definition of $q$ above. The variable $\delta$ represents the link orientation relative to the cell: positive when the link points out of the cell, negative when it points into the cell. The variable is represented in a Landlab grid by the array `link_dirs_at_node`: one for each link, starting from the "east" (or "right") direction and going counter-clockwise.Suppose $R$ is a square grid cell of width $\Delta x$. Then:$$\oint_S \mathbf{f} \cdot\mathbf{n} dR = \sum_{k=1}^4 q_k \delta_k \Delta x$$where $q_k$ is the magnitude of the vector field at face $k$, and $\delta = -1$ if the link at face $k$ points inward, and $+1$ if the link points outward.For this Poisson problem (i.e., diffusion with zero time derivative), the flux between the two nodes at either end of a link is approximated as the difference in $z$ divided by the distance, which here is $\Delta x$. For each of the four directions:$q_e = -(D/\Delta x) (z_e - z_i)$$q_n = -(D/\Delta x) (z_n - z_i)$$q_e = -(D/\Delta x) (z_i - z_w)$$q_s = -(D/\Delta x) (z_i - z_s)$Here the subscript refers to the four cardinal directions. When you work out the summation above, you get:$$\sum_{k=1}^4 q_k \delta_k \Delta x = -D (z_e + z_n - + z_w + z_s - 4z_i)$$.Now plug this back into our governing equation, and divide both sides by $A_r = \Delta x^2$:$$-D (z_e + z_n - + z_w + z_s - 4z_i) = U$$or$$\boxed{z_e + z_n - + z_w + z_s - 4z_i = -U/D}$$So the above represents a system of equations: one equation per core node in a Landlab grid. For any given core node, $z_i$ is the elevation of the node itself, and the other four are the elevations of its four neighbors. By the way, for a regular raster grid, this finite-volume setup turns out to be the same as the finite-difference version. Here the directional subscripts will ultimately be replaced with indices of the particular neighboring nodes. Example of a finite-volume setupSuppose we have a raster model grid with 4 rows and 5 columns, so that there are 6 interior nodes. To make it interesting, let's assume that one of the interior nodes is actually a fixed-value boundary. We will also assume that the perimeter nodes are fixed-value boundaries. Fixed-value boundary simply means that we will keep the elevation constant at these nodes. In total, then, there are 5 core nodes at which we wish to solve for $z$. An illustration of the grid, with the lower-left node being node number 0, looks like:`o---o---o---o---o | | | | | o---.---.---o---o | | | | | o---.---.---.---o | | | | | o---o---o---o---o`In the illustration, `.` is a core node, and `o` is a fixed-value boundary node. The numbering of *nodes* looks like this:`15---16---17---18---19 | | | | | 10---11---12---13---14 | | | | | 5--- 6--- 7--- 8--- 9 | | | | | 0--- 1--- 2--- 3--- 4`Here's a version where we number the *core nodes* consecutively:`o---o---o---o---o | | | | | o---3---4---o---o | | | | | o---0---1---2---o | | | | | o---o---o---o---o`These numbers correspond to rows in a matrix that we will construct. For each row, the column representing the node itself gets a -4, corresponding to the boxed equation above. For each of its neighboring **core** nodes, the corresponding column gets a +1. For example, the first row in the matrix, representing core node 0 in the above sketch, will have a -4 in column 0. It will have a +1 in column 1, representing the neighbor to its east, and a +1 in column 3, representing the neighbor to its north. Here's what the matrix should look like:\begin{vmatrix}-4 & 1 & 0 & 1 & 0 \\ 1 & -4 & 1 & 0 & 1 \\ 0 & 1 & -4 & 0 & 0 \\ 1 & 0 & 0 & -4 & 1 \\ 0 & 1 & 0 & 1 & -4 \\\end{vmatrix}But what happens when one or more of the four neighbors is not another core node, but rather a fixed-value boundary? That's actually the case for *all* of the core nodes in the above example. To appreciate how this works, recall that we're going to put all the constant terms on the right-hand side of the equation. To write this out, we need a way to notate both core nodes and fixed-value nodes. Here, we'll use a subscript to index by *core node ID* (for the core nodes), and parentheses to index by *node ID* (for the boundary nodes). With that notation in mind, the equations for the example grid above are:\begin{eqnarray}z_1 + z_3 + z(5) + z(1) - 4z_0 = -U/D \\z_2 + z_4 + z_0 + z(2) - 4z_1 = -U/D \\z(9) + z(13) + z_1 + z(3) - 4z_2 = -U/D \\z_4 + z(16) + z(10) + z_0 - 4z_3 = -U/D \\z(13) + z(17) + z_3 + z_1 - 4z_4 = -U/D \\\end{eqnarray}With this notation, it's easy to spot the fixed-value boundary nodes, whose entries we'll move to the right-side:\begin{eqnarray} - 4z_0 + z_1 + z_3 = -U/D - (z(5) + z(1)) \\z_0 - 4z_1 + z_2 + z_4 = -U/D - z(2) \\z_1 - 4z_2 = -U/D - (z(9) + z(13) + z_1 + z(3)) \\z_0 - 4z_3 + z_4 = -U/D - (z(16) + z(10)) \\z_1 + z_3 - 4z_4 = -U/D - (z(13) + z(17)) \\\end{eqnarray}The above set of equations is represented by the following matrix equation:\begin{gather}\begin{bmatrix}-4 & 1 & 0 & 1 & 0 \\ 1 & -4 & 1 & 0 & 1 \\ 0 & 1 & -4 & 0 & 0 \\ 1 & 0 & 0 & -4 & 1 \\ 0 & 1 & 0 & 1 & -4 \\\end{bmatrix}\begin{bmatrix}z_0 \\z_1 \\z_2 \\z_3 \\z_4\end{bmatrix} =\begin{bmatrix}-U/D - (z(5) + z(1)) \\-U/D - z(2) \\-U/D - (z(9) + z(13) + z_1 + z(3)) \\-U/D - (z(16) + z(10)) \\-U/D - (z(13) + z(17))\end{bmatrix}\end{gather}or more succinctly,$$A\mathbf{z} = \mathbf{b}$$for which the solution is$$\mathbf{z} = A^{-1} \mathbf{b}$$In other words this is the equation that we need to solve by inverting the matrix $A$, which we can do using `numpy.linalg.inv()`. Here's an example:
###Code
import numpy as np
mat = np.array(
[
[-4, 1, 0, 1, 0],
[1, -4, 1, 0, 1],
[0, 1, -4, 0, 0],
[1, 0, 0, -4, 1],
[0, 1, 0, 1, -4],
]
)
print(np.linalg.inv(mat))
###Output
_____no_output_____
###Markdown
Let's assume for the sake of this example that $U=10^{-4}$ m/yr, $D=10^{-2}$ m$^2$/yr, and all the nodes around the perimeter have zero elevation. What does the solution look like in terms of numbers?
###Code
U = 0.0001
D = 0.01
rhs = -(U / D) + np.zeros((5, 1))
solution = np.dot(
np.linalg.inv(mat), rhs
) # dot product for matrix-vector multiplication
print(solution)
###Output
_____no_output_____
###Markdown
You can see from this example that once you have your matrix and right-hand side vector, numpy's linear algebra functions make it straightforward to solve the system. The tricky part is building the matrix and the right-side vector in the first place. This is where Landlab's matrix-building utility comes in. Landlab's matrix-building functionsTo facilitate matrix-based numerical solutions, Landlab's collection of utilities includes two helper functions:- `get_core_node_matrix(grid, value, rhs=None)` creates and returns a matrix like $A$ above for a Landlab grid, as well as a right-hand-side vector. The matrix is returned as an M x M numpy array, where M is the number of core nodes in the grid. The right-hand-side vector is an M x 1 array. Each row in the matrix represents one core node. The rules for building a row, and the corresponding row in the right-hand-side vector, are: - For every *active link* connected to the node, if the link connects to another core node, the corresponding column in the matrix is assigned the value $+1$. For example, in the tiny grid presented earlier, core node 0 is connected to two other core nodes, 1 and 3. Therefore, columns 1 and 3 in row 0 of the matrix are each set to $+1$. - The matrix column representing the node itself is assigned a value equal to $-1$ times the number of active links that connect to the node, which represents the number of neighboring nodes that are not closed-boundary nodes. In the example grid above, core node 0 is connected for four active links, and so row 0, column 0 is set to -4. - All other matrix entries are zero. - For every neighboring *fixed-value boundary* node adjacent to core node $i$, the value at the neighbor node is subtracted from column $i$ of the right-hand side vector. This is how a fixed-value boundary condition is handled. In the example grid above, core node 0 is bordered by two fixed-value boundary nodes (node IDs 1 and 5). The values of $z$ at these two fixed-value nodes are subtracted from row 0 of the right-hand-side boundary vector. - `get_core_node_matrix_var_coef(grid, value, coef_at_link=coef, rhs=None)` does basically the same thing, but allows for a spatially variable coefficient ($D$, or its equivalent in your particular problem). In the example above, we assumed that $D$ was constant, and were therefore able to move it to the right side of the equation. But there are plenty of cases where you might want to allow $D$ to vary in space. This function allows that by taking as an input a 1D array containing a value of $D$ for each grid link. The function ensures that $D$ is factored in appropropriately. (Exercise to the reader: use the example above, but with a spatially variable $D$, to work out what "appropriately" means here). Note that when $D$ varies in space, it is included on the left side of the equation (i.e., in the matrix $A$) and **not** in the right-side vector.Both functions return two items: an M x M array (the matrix) and an M x 1 array (for the right-hand-side vector). With both functions, however, it is your job as the user to properly set up your right-hand-side vector. You have two options. The first is to pass in an array as the `rhs` argument. It should be a 1D array of length equal to the number of core nodes. The function will then add the boundary condition information to whatever values you have already put there. The second option is to omit the `rhs` argument. In this case the function will create a "preliminary" version that contains **only** the values needed to handle fixed-value boundary conditions; you must then add the rest of your right-side information to this before solving. For example, in the sample problem above, you would need to add $-U/D$ to each element of your right-hand-side vector, while the function would take care of adding the various boundary $z$ values.Both functions take either a `RasterModelGrid` or a `HexModelGrid` as the first argument. The matrix-creation functions work for both grid types. Note however that if you have a hex grid, you must multiply your right-hand-side vector by 3/2 (exercise: modify the derivation above, accounting for the area and side length of a hexagon, to demonstrate why this is the case). In principle, the same finite-volume solution method should work for other grid types too, but with modifications to handle spatial variation in cell area, face width, and link length. (If irregular-grid functionality is something you need for your application, we encourage you to develop it and submit a pull request!)Both functions also take a `value` array containing the node-based values of interest (e.g., $z$ in the sample problem above). This should by a 1D numpy array of length equal to the total number of grid nodes. Examples using Landlab matrix functions Constant coefficientThe example below uses Landlab to solve the tiny sample problem described above.
###Code
from landlab import RasterModelGrid, imshow_grid
from landlab.utils import get_core_node_matrix
import numpy as np
# Define parameter values
U = 0.0001 # uplift rate of material, relative to baselevel, m/yr
D = 0.01 # soil transport coefficient ("diffusivity"), m2/yr
# Create a simple grid
grid = RasterModelGrid((4, 5), xy_spacing=1.0)
# Add a field for topographic elevation
z = grid.add_zeros("topographic__elevation", at="node")
# Convert one of the interior nodes to boundary
grid.status_at_node[13] = grid.BC_NODE_IS_FIXED_VALUE
# Build the matrix and right-hand-side vector
mat, rhs = get_core_node_matrix(grid, z)
# Add the correct data to the right-hand-side vector
rhs -= U / D
# Let's take a look at them
print("Matrix:")
print(mat)
print("Right-side vector:")
print(rhs)
# Solve: invert the matrix using numpy's linalg.inv() function, then take dot product
z_core = np.dot(np.linalg.inv(mat.toarray()), rhs)
print("Solution:")
print(z_core)
# Insert the solution into the elevation field
z[grid.core_nodes] = z_core.flatten() # flatten because z is a 1D array
# Plot
imshow_grid(grid, z)
###Output
_____no_output_____
###Markdown
Note that the solution for our tiny test grid is the same as before, as it should be. Version with a variable coefficientNext, we repeat the above, but for a case of a spatially variable $D$. We'll first do it with an array of $D$ values, one per link, where the $D$ values are the same as above, just to demonstrate that the solution is the same.
###Code
from landlab.utils import get_core_node_matrix
# Define an array of D values
D = 0.01 + np.zeros(
grid.number_of_links
) # we could also make this a grid field if desired
# Build the matrix and right-hand-side vector
mat, rhs = get_core_node_matrix(grid, z, coef_at_link=D)
# Add the correct data to the right-hand-side vector: this time D is on the left side, so
# we don't incorporate it in the right-side vector
rhs -= U
# Let's take a look at them
print("Matrix:")
print(mat)
print("Right-side vector:")
print(rhs)
# Solve: invert the matrix using numpy's linalg.inv() function, then take dot product
z_core = np.dot(np.linalg.inv(mat.toarray()), rhs)
print("Solution:")
print(z_core)
# Insert the solution into the elevation field
z[grid.core_nodes] = z_core.flatten() # flatten because z is a 1D array
# Plot
imshow_grid(grid, z)
###Output
_____no_output_____
###Markdown
Here, the matrix and RHS vector are different, but the solution is the same. We've simply factored $D$ into the left side instead of the right side.Now let's try making $D$ actually vary in space. For the sake of illustration, we'll assign a high value to the links on the left, and a 100x lower value to the links on the right. What do you think this will do to the topography?
###Code
# Define an array of D values
D = np.zeros(grid.number_of_links) # we could also make this a grid field if desired
D[grid.x_of_node[grid.node_at_link_head] > 2.0] = 0.001
D[grid.x_of_node[grid.node_at_link_head] <= 2.0] = 0.1
print("D values:")
print(D)
# Build the matrix and right-hand-side vector
mat, rhs = get_core_node_matrix(grid, z, coef_at_link=D)
# Add the correct data to the right-hand-side vector: this time D is on the left side, so
# we don't incorporate it in the right-side vector
rhs -= U
# Let's take a look at them
print("Matrix:")
print(mat)
print("Right-side vector:")
print(rhs)
# Solve: invert the matrix using numpy's linalg.inv() function, then take dot product
z_core = np.dot(np.linalg.inv(mat.toarray()), rhs)
print("Solution:")
print(z_core)
# Insert the solution into the elevation field
z[grid.core_nodes] = z_core.flatten() # flatten because z is a 1D array
# Plot
imshow_grid(grid, z)
###Output
_____no_output_____
###Markdown
Here the lone core cell on the right is surrounded by links at which transport is inefficient; in other words, $D$ is small. Therefore, the cell needs steep slopes on all sides in order to transport out the incoming soil. The other cells are all bordered by at least one link with a high $D$ value, so they don't need much gradient to transport out the incoming material. Comparison with 1D analytical solutionIn the next example, we'll set up an effectively 1D domain, and compare it with the known analytical solution. We can produce a quasi-1D grid by giving it just 3 rows, two of which are boundary rows, and setting the status of those boundaries to *closed*.The expected analytical solution is a parabola:$$z = \frac{UL^2}{D}\left(\frac{x}{L} - \frac{1}{2}\left[\frac{x}{L}\right]^2\right)$$
###Code
from landlab import RasterModelGrid, imshow_grid
from landlab.utils import get_core_node_matrix
import numpy as np
# Define parameter values
U = 0.0001 # uplift rate of material, relative to baselevel, m/yr
D = 0.01 # soil transport coefficient ("diffusivity"), m2/yr
# Create a simple grid
grid = RasterModelGrid((3, 101), xy_spacing=1.0)
# Add a field for topographic elevation
z = grid.add_zeros("topographic__elevation", at="node")
# Set closed boundaries on north and south
grid.set_closed_boundaries_at_grid_edges(False, True, False, True)
# Build the matrix and right-hand-side vector
mat, rhs = get_core_node_matrix(grid, z)
# Add the correct data to the right-hand-side vector
rhs -= U / D
# Solve: invert the matrix using numpy's linalg.inv() function, then take dot product
z_core = np.dot(np.linalg.inv(mat.toarray()), rhs)
# Insert the solution into the elevation field
z[grid.core_nodes] = z_core.flatten() # flatten because z is a 1D array
# Calculate the analytical solution
middle_row = np.arange(101, 202, dtype=int) # middle row of grid nodes
x = grid.x_of_node[middle_row] # x coordinates: 0, 1, ... 100
L = 50.0 # half-length of domain
za = (U / D) * (x * L - 0.5 * x * x) # analytical solution
# Plot
import matplotlib.pyplot as plt
plt.plot(x, z[middle_row], "b.")
plt.plot(x, za, "r")
plt.xlabel("Distance (m)")
plt.ylabel("Height (m)")
plt.legend(["numerical", "analytical"])
###Output
_____no_output_____
###Markdown
Hexagonal gridOne advantage of the finite-volume method is that it isn't limited to rectilinear grids. The next example demonstrates this with a tiny hex grid. This wee little grid has just two core nodes, so our matrix will be 2x2. One change is that we need to multiply the RHS values by 3/2 to account for the hex geometry.
###Code
from landlab import HexModelGrid
# Instantiate the grid: here 3 rows, with 3 columns top and bottom and 4 in the middle
hg = HexModelGrid((3, 3))
# Add the elevation field
z = hg.add_zeros("topographic__elevation", at="node")
# Constants, as before
U = 0.0001
D = 0.01
dx = 1.0 # this is the spacing between nodes
# Create the matrix and RHS
mat, rhs = get_core_node_matrix(hg, z)
# Fill in the rest of the RHS vector, including a factor of 3/2 for the hex grid.
rhs[:] += -1.5 * U * dx * dx / D
# Solve
soln = np.dot(np.linalg.inv(mat.toarray()), rhs)
z[hg.core_nodes] = soln.flatten()
print(mat)
print(rhs)
print(z)
###Output
_____no_output_____
###Markdown
We can test this. The uplift rate times the cell area represents the volume rate in. Because this is a steady problem, it should equal the volume rate out. The volume rate out across any outer cell face is equal to the gradient across the face times $D$ times the width of the face. The face width in this case is $3^{-1/2}$. Here, the boundaries are all at zero and the distance between nodes is unity, so the gradient is equal to the elevation value. Hence, the flux out across any one face is:$$3^{-1/2} Dz$$and the total flux equals the flux of one face times the number of outer faces, of which there happen to be 10. Here's the calculation:
###Code
# Test: area times 2 cells times uplift rate should equal number of exposed sides times elevation
area = 0.5 * 3.0 ** 0.5
influx = 2 * area * U
outflux = 10 * D * (1.0 / 3.0 ** 0.5) * z[4]
print(influx)
print(outflux)
###Output
_____no_output_____
###Markdown
Building a matrix for numerical methods using a Landlab grid(Greg Tucker, University of Colorado Boulder, July 2020)*This notebook explains how to use the matrix-building functions to construct a matrix for a finite-volume or finite-difference solution on a Landlab grid.* IntroductionNumerical solutions to differential equations often involve matrices. With grid-based numerical models, like those that Landlab is designed to support, the problem is *discretized* in space: we solve for one or more state variables of interest at a series of discrete spatial locations, such as a grid node or the cell that surrounds it. That process of discretization converts a partial differential equation into a set of ordinary differential equations, with one equation per point. Consider, for example, the one-dimensional diffusion equation:$$\frac{\partial \eta}{\partial t} = D\frac{\partial^2 \eta}{\partial x^2}$$where $t$ is time, $x$ is distance, $D$ is a transport coefficient, and $\eta$ could be concentration of a dissolved chemical (classic chemical diffusion), the temperature in a solid (heat diffusion), the velocity of flow in a viscous liquid (viscous momentum diffusion), or the height of the land on a hillslope (soil diffusion). If the domain is discretized such that we seek the value of $\eta$ at a series of discrete points, then the above equation for a given point $i$ becomes:$$\frac{d \eta_i}{d t} = D\frac{d^2 \eta}{d x^2}\bigg\rvert_i$$where the subscript at the right means "evaluated at $i$". Once the right side has been cast in terms of values of $\eta$ at particular points, you end up with a linear system of equations, and matrix methods provide a natural way to solve them. One example among many is an implicit finite-difference solution to the one-dimensional form of the diffusion equation, which involves constructing a matrix and a "right-hand side" vector, inverting the matrix, and multiplying it by the vector to obtain a solution for the state variable at each grid node (for more on that particular example, see *Mathematical Modeling of Earth's Dynamical Systems* by Slingerland and Kump, or *Numerical Recipes* by Press et al.).When using matrix methods to solve a set of equation at discrete points, whether in 2D or 1D (or even 3D), you typically have an $MxM$ matrix, where $M$ is the number of solution points. Each row in the matrix represents the equation for one of the points. If the equation for a given point includes terms that represent, say, two of its immediately neighboring points, then the columns representing those two points contain non-zero entries. More generally, finite-volume and finite-difference matrices tend be sparse, with only a few non-zero entries in each row: the column that represents the point itself, and the columns representing its immediate neighbors.Building the matrix therefore requires knowledge of which points are connected to which other points in the grid. In 1D, this is easy. In 2D, it's a bit more complicated. Fortunately, the structure of a Landlab grid lends itself to this task. In particular, we know the connectivity for the *nodes* in the grid. It also turns out that when nodes are the solution points (as is typical), the number of equations---and thus $M$---corresponds exactly to the number of *core nodes* in the grid.In the following, we first work through the mathematics in a simple example: a finite-volume matrix solution to a steady diffusion equation with a source term, also known as a Poisson equation. We then show some worked examples of the Landlab matrix tools in action. Example: steady diffusion with a source termConsider the diffusion model for hillslope evolution in two dimensions. The equation describes the time evolution of land surface height, $z$, given a transport coefficient $D$ $[L^2/T]$, and relative uplift rate $U$ $[L/T]$ as:$$\frac{\partial z}{\partial t} = U - \nabla \cdot (-D \nabla z)$$Here $\nabla z$ is the gradient of $z$, which here is a two-element vector (components in the $x$ and $y$ directions, respectively), and $\nabla\cdot$ is the divergence operator. We'll use a matrix method to solve for $z(x)$ when the time derivative is zero. So the equation we want to solve is:$$U \nabla \cdot (-D \nabla z) = 0$$If $D$ is spatially uniform, we can write this as:$$\boxed{\nabla^2 z = -U/D}$$This is the equation we're going to discretize and solve. Here $\nabla^2$ is understood to be the divergence-of-the-gradient, and in 1D would just be a second derivative:$$\frac{d^2z}{dx^2} = -\frac{U}{D}$$The minus sign is important: it indicates upward convexity of the solution when $U$ and $D$ are positive (which they always are in this case). Finite-volume discretizationLet's take a step back in the derivation of the diffusion equation to note that it is composed of two parts together. One part is mass conservation:$$\frac{\partial z}{\partial t} = U - \nabla \cdot \mathbf{q}$$where $\mathbf{q}$ is soil volume flux per unit width $[L^2/T]$. The other part is the flux law:$$\mathbf{q} = -D\nabla z$$For this example, we'll set the time derivative to zero, meaning we are looking for a steady solution.Next, we integrate the conservation law over a 2D region $R$. In general, $R$ is a simply connected region. Ultimately for us, it will be a grid cell, which could be a square, a rectangle, a hexagon, or even an irregular polygon.$$\int\int_R \nabla\cdot \mathbf{q} dR = \int\int_R U dR$$Because $U$ is constant inside the region $R$,$$\int\int_R \nabla\cdot \mathbf{q} dR = U A_r$$Now we apply Green's theorem, which basically says that an area integral over the divergence of a vector field is equivalent to a line integral of the surface-normal component of that vector around the perimeter of the region. Intuitively, if we consider $\mathbf{q}$ to be a flux in this case, what we're saying is that we can obtain the net total flux over the region (grid cell!) by integrating the flux all around the perimeter. Think of it as keeping track of all the people who enter or leave the perimeter of a playing field.$$\oint_S \mathbf{q} \cdot\mathbf{n} dS = U A_r$$where $\mathbf{n}$ is an (outward-facing) unit vector perpendicular to the perimeter $S$ that encloses region $R$. For us, again the perimeter is just the perimeter of the grid cell: the four sides of a square or rectangle, or the six side of a hexagon, the $N$ sides of a Voronoi polygon, or whatever. Then the line integral becomes a summation.We will define a quantity $q$ that represents the face-normal component of $\mathbf{q}$. The sign convention is as follows:- $q$ is positive if the vector orientation is toward the upper-right half space (including "right" and "up")- $q$ is negative if the vector orientation is toward the lower-left half space (including "left" and "down")We will also define a binary variable $\delta$, which is negative if the outward surface-normal points toward the lower-left half space, and positive if it points toward the upper-right half space.Here's where Landlab grids come into the picture. The two definitions represent the use of *links* in a Landlab grid: when $q$ is positive when it oriented in the link's direction, and negative when oriented in the opposite direction. In a simple raster grid, where the links are all horizontal or vertical, the interpretation is very simple: flow to the right (increasing $x$) is positive, and to the left is negative; flow upward (increasing $y$) is positive, and downward is negative.More generally, whatever the grid type, links by convention always "point" toward the upper-right half space; hence the general definition of $q$ above. The variable $\delta$ represents the link orientation relative to the cell: positive when the link points out of the cell, negative when it points into the cell. The variable is represented in a Landlab grid by the array `link_dirs_at_node`: one for each link, starting from the "east" (or "right") direction and going counter-clockwise.Suppose $R$ is a square grid cell of width $\Delta x$. Then:$$\oint_S \mathbf{f} \cdot\mathbf{n} dR = \sum_{k=1}^4 q_k \delta_k \Delta x$$where $q_k$ is the magnitude of the vector field at face $k$, and $\delta = -1$ if the link at face $k$ points inward, and $+1$ if the link points outward.For this Poisson problem (i.e., diffusion with zero time derivative), the flux between the two nodes at either end of a link is approximated as the difference in $z$ divided by the distance, which here is $\Delta x$. For each of the four directions:$q_e = -(D/\Delta x) (z_e - z_i)$$q_n = -(D/\Delta x) (z_n - z_i)$$q_e = -(D/\Delta x) (z_i - z_w)$$q_s = -(D/\Delta x) (z_i - z_s)$Here the subscript refers to the four cardinal directions. When you work out the summation above, you get:$$\sum_{k=1}^4 q_k \delta_k \Delta x = -D (z_e + z_n - + z_w + z_s - 4z_i)$$.Now plug this back into our governing equation, and divide both sides by $A_r = \Delta x^2$:$$-D (z_e + z_n - + z_w + z_s - 4z_i) = U$$or$$\boxed{z_e + z_n - + z_w + z_s - 4z_i = -U/D}$$So the above represents a system of equations: one equation per core node in a Landlab grid. For any given core node, $z_i$ is the elevation of the node itself, and the other four are the elevations of its four neighbors. By the way, for a regular raster grid, this finite-volume setup turns out to be the same as the finite-difference version. Here the directional subscripts will ultimately be replaced with indices of the particular neighboring nodes. Example of a finite-volume setupSuppose we have a raster model grid with 4 rows and 5 columns, so that there are 6 interior nodes. To make it interesting, let's assume that one of the interior nodes is actually a fixed-value boundary. We will also assume that the perimeter nodes are fixed-value boundaries. Fixed-value boundary simply means that we will keep the elevation constant at these nodes. In total, then, there are 5 core nodes at which we wish to solve for $z$. An illustration of the grid, with the lower-left node being node number 0, looks like:`o---o---o---o---o | | | | | o---.---.---o---o | | | | | o---.---.---.---o | | | | | o---o---o---o---o`In the illustration, `.` is a core node, and `o` is a fixed-value boundary node. The numbering of *nodes* looks like this:`15---16---17---18---19 | | | | | 10---11---12---13---14 | | | | | 5--- 6--- 7--- 8--- 9 | | | | | 0--- 1--- 2--- 3--- 4`Here's a version where we number the *core nodes* consecutively:`o---o---o---o---o | | | | | o---3---4---o---o | | | | | o---0---1---2---o | | | | | o---o---o---o---o`These numbers correspond to rows in a matrix that we will construct. For each row, the column representing the node itself gets a -4, corresponding to the boxed equation above. For each of its neighboring **core** nodes, the corresponding column gets a +1. For example, the first row in the matrix, representing core node 0 in the above sketch, will have a -4 in column 0. It will have a +1 in column 1, representing the neighbor to its east, and a +1 in column 3, representing the neighbor to its north. Here's what the matrix should look like:\begin{vmatrix}-4 & 1 & 0 & 1 & 0 \\ 1 & -4 & 1 & 0 & 1 \\ 0 & 1 & -4 & 0 & 0 \\ 1 & 0 & 0 & -4 & 1 \\ 0 & 1 & 0 & 1 & -4 \\\end{vmatrix}But what happens when one or more of the four neighbors is not another core node, but rather a fixed-value boundary? That's actually the case for *all* of the core nodes in the above example. To appreciate how this works, recall that we're going to put all the constant terms on the right-hand side of the equation. To write this out, we need a way to notate both core nodes and fixed-value nodes. Here, we'll use a subscript to index by *core node ID* (for the core nodes), and parentheses to index by *node ID* (for the boundary nodes). With that notation in mind, the equations for the example grid above are:\begin{eqnarray}z_1 + z_3 + z(5) + z(1) - 4z_0 = -U/D \\z_2 + z_4 + z_0 + z(2) - 4z_1 = -U/D \\z(9) + z(13) + z_1 + z(3) - 4z_2 = -U/D \\z_4 + z(16) + z(10) + z_0 - 4z_3 = -U/D \\z(13) + z(17) + z_3 + z_1 - 4z_4 = -U/D \\\end{eqnarray}With this notation, it's easy to spot the fixed-value boundary nodes, whose entries we'll move to the right-side:\begin{eqnarray} - 4z_0 + z_1 + z_3 = -U/D - (z(5) + z(1)) \\z_0 - 4z_1 + z_2 + z_4 = -U/D - z(2) \\z_1 - 4z_2 = -U/D - (z(9) + z(13) + z_1 + z(3)) \\z_0 - 4z_3 + z_4 = -U/D - (z(16) + z(10)) \\z_1 + z_3 - 4z_4 = -U/D - (z(13) + z(17)) \\\end{eqnarray}The above set of equations is represented by the following matrix equation:\begin{gather}\begin{bmatrix}-4 & 1 & 0 & 1 & 0 \\ 1 & -4 & 1 & 0 & 1 \\ 0 & 1 & -4 & 0 & 0 \\ 1 & 0 & 0 & -4 & 1 \\ 0 & 1 & 0 & 1 & -4 \\\end{bmatrix}\begin{bmatrix}z_0 \\z_1 \\z_2 \\z_3 \\z_4\end{bmatrix} =\begin{bmatrix}-U/D - (z(5) + z(1)) \\-U/D - z(2) \\-U/D - (z(9) + z(13) + z_1 + z(3)) \\-U/D - (z(16) + z(10)) \\-U/D - (z(13) + z(17))\end{bmatrix}\end{gather}or more succinctly,$$A\mathbf{z} = \mathbf{b}$$for which the solution is$$\mathbf{z} = A^{-1} \mathbf{b}$$In other words this is the equation that we need to solve by inverting the matrix $A$, which we can do using `numpy.linalg.inv()`. Here's an example:
###Code
import numpy as np
mat = np.array([[-4, 1, 0, 1, 0],
[ 1, -4, 1, 0, 1],
[ 0, 1, -4, 0, 0],
[ 1, 0, 0, -4, 1],
[ 0, 1, 0, 1, -4],])
print(np.linalg.inv(mat))
###Output
[[-0.29353933 -0.08988764 -0.02247191 -0.08426966 -0.04353933]
[-0.08988764 -0.31460674 -0.07865169 -0.04494382 -0.08988764]
[-0.02247191 -0.07865169 -0.26966292 -0.01123596 -0.02247191]
[-0.08426966 -0.04494382 -0.01123596 -0.29213483 -0.08426966]
[-0.04353933 -0.08988764 -0.02247191 -0.08426966 -0.29353933]]
###Markdown
Let's assume for the sake of this example that $U=10^{-4}$ m/yr, $D=10^{-2}$ m$^2$/yr, and all the nodes around the perimeter have zero elevation. What does the solution look like in terms of numbers?
###Code
U = 0.0001
D = 0.01
rhs = -(U / D) + np.zeros((5, 1))
solution = np.dot(np.linalg.inv(mat), rhs) # dot product for matrix-vector multiplication
print(solution)
###Output
[[0.00533708]
[0.00617978]
[0.00404494]
[0.00516854]
[0.00533708]]
###Markdown
You can see from this example that once you have your matrix and right-hand side vector, numpy's linear algebra functions make it straightforward to solve the system. The tricky part is building the matrix and the right-side vector in the first place. This is where Landlab's matrix-building utility comes in. Landlab's matrix-building functionsTo facilitate matrix-based numerical solutions, Landlab's collection of utilities includes two helper functions:- `get_core_node_matrix(grid, value, rhs=None)` creates and returns a matrix like $A$ above for a Landlab grid, as well as a right-hand-side vector. The matrix is returned as an M x M numpy array, where M is the number of core nodes in the grid. The right-hand-side vector is an M x 1 array. Each row in the matrix represents one core node. The rules for building a row, and the corresponding row in the right-hand-side vector, are: - For every *active link* connected to the node, if the link connects to another core node, the corresponding column in the matrix is assigned the value $+1$. For example, in the tiny grid presented earlier, core node 0 is connected to two other core nodes, 1 and 3. Therefore, columns 1 and 3 in row 0 of the matrix are each set to $+1$. - The matrix column representing the node itself is assigned a value equal to $-1$ times the number of active links that connect to the node, which represents the number of neighboring nodes that are not closed-boundary nodes. In the example grid above, core node 0 is connected for four active links, and so row 0, column 0 is set to -4. - All other matrix entries are zero. - For every neighboring *fixed-value boundary* node adjacent to core node $i$, the value at the neighbor node is subtracted from column $i$ of the right-hand side vector. This is how a fixed-value boundary condition is handled. In the example grid above, core node 0 is bordered by two fixed-value boundary nodes (node IDs 1 and 5). The values of $z$ at these two fixed-value nodes are subtracted from row 0 of the right-hand-side boundary vector. - `get_core_node_matrix_var_coef(grid, value, coef_at_link=coef, rhs=None)` does basically the same thing, but allows for a spatially variable coefficient ($D$, or its equivalent in your particular problem). In the example above, we assumed that $D$ was constant, and were therefore able to move it to the right side of the equation. But there are plenty of cases where you might want to allow $D$ to vary in space. This function allows that by taking as an input a 1D array containing a value of $D$ for each grid link. The function ensures that $D$ is factored in appropropriately. (Exercise to the reader: use the example above, but with a spatially variable $D$, to work out what "appropriately" means here). Note that when $D$ varies in space, it is included on the left side of the equation (i.e., in the matrix $A$) and **not** in the right-side vector.Both functions return two items: an M x M array (the matrix) and an M x 1 array (for the right-hand-side vector). With both functions, however, it is your job as the user to properly set up your right-hand-side vector. You have two options. The first is to pass in an array as the `rhs` argument. It should be a 1D array of length equal to the number of core nodes. The function will then add the boundary condition information to whatever values you have already put there. The second option is to omit the `rhs` argument. In this case the function will create a "preliminary" version that contains **only** the values needed to handle fixed-value boundary conditions; you must then add the rest of your right-side information to this before solving. For example, in the sample problem above, you would need to add $-U/D$ to each element of your right-hand-side vector, while the function would take care of adding the various boundary $z$ values.Both functions take either a `RasterModelGrid` or a `HexModelGrid` as the first argument. The matrix-creation functions work for both grid types. Note however that if you have a hex grid, you must multiply your right-hand-side vector by 3/2 (exercise: modify the derivation above, accounting for the area and side length of a hexagon, to demonstrate why this is the case). In principle, the same finite-volume solution method should work for other grid types too, but with modifications to handle spatial variation in cell area, face width, and link length. (If irregular-grid functionality is something you need for your application, we encourage you to develop it and submit a pull request!)Both functions also take a `value` array containing the node-based values of interest (e.g., $z$ in the sample problem above). This should by a 1D numpy array of length equal to the total number of grid nodes. Examples using Landlab matrix functions Constant coefficientThe example below uses Landlab to solve the tiny sample problem described above.
###Code
from landlab import RasterModelGrid, imshow_grid
from landlab.utils import get_core_node_matrix
import numpy as np
# Define parameter values
U = 0.0001 # uplift rate of material, relative to baselevel, m/yr
D = 0.01 # soil transport coefficient ("diffusivity"), m2/yr
# Create a simple grid
grid = RasterModelGrid((4, 5), xy_spacing=1.0)
# Add a field for topographic elevation
z = grid.add_zeros('topographic__elevation', at='node')
# Convert one of the interior nodes to boundary
grid.status_at_node[13] = grid.BC_NODE_IS_FIXED_VALUE
# Build the matrix and right-hand-side vector
mat, rhs = get_core_node_matrix(grid, z)
# Add the correct data to the right-hand-side vector
rhs -= U / D
# Let's take a look at them
print('Matrix:')
print(mat)
print('Right-side vector:')
print(rhs)
# Solve: invert the matrix using numpy's linalg.inv() function, then take dot product
z_core = np.dot(np.linalg.inv(mat.toarray()), rhs)
print('Solution:')
print(z_core)
# Insert the solution into the elevation field
z[grid.core_nodes] = z_core.flatten() # flatten because z is a 1D array
# Plot
imshow_grid(grid, z)
###Output
Matrix:
(0, 0) -4.0
(1, 0) 1.0
(3, 0) 1.0
(0, 1) 1.0
(1, 1) -4.0
(2, 1) 1.0
(4, 1) 1.0
(1, 2) 1.0
(2, 2) -4.0
(0, 3) 1.0
(3, 3) -4.0
(4, 3) 1.0
(1, 4) 1.0
(3, 4) 1.0
(4, 4) -4.0
Right-side vector:
[[-0.01]
[-0.01]
[-0.01]
[-0.01]
[-0.01]]
Solution:
[[ 0.00533708]
[ 0.00617978]
[ 0.00404494]
[ 0.00516854]
[ 0.00533708]]
###Markdown
Note that the solution for our tiny test grid is the same as before, as it should be. Version with a variable coefficientNext, we repeat the above, but for a case of a spatially variable $D$. We'll first do it with an array of $D$ values, one per link, where the $D$ values are the same as above, just to demonstrate that the solution is the same.
###Code
from landlab.utils import get_core_node_matrix
# Define an array of D values
D = 0.01 + np.zeros(grid.number_of_links) # we could also make this a grid field if desired
# Build the matrix and right-hand-side vector
mat, rhs = get_core_node_matrix(grid, z, coef_at_link=D)
# Add the correct data to the right-hand-side vector: this time D is on the left side, so
# we don't incorporate it in the right-side vector
rhs -= U
# Let's take a look at them
print('Matrix:')
print(mat)
print('Right-side vector:')
print(rhs)
# Solve: invert the matrix using numpy's linalg.inv() function, then take dot product
z_core = np.dot(np.linalg.inv(mat.toarray()), rhs)
print('Solution:')
print(z_core)
# Insert the solution into the elevation field
z[grid.core_nodes] = z_core.flatten() # flatten because z is a 1D array
# Plot
imshow_grid(grid, z)
###Output
Matrix:
(0, 0) -0.04
(1, 0) 0.01
(3, 0) 0.01
(0, 1) 0.01
(1, 1) -0.04
(2, 1) 0.01
(4, 1) 0.01
(1, 2) 0.01
(2, 2) -0.04
(0, 3) 0.01
(3, 3) -0.04
(4, 3) 0.01
(1, 4) 0.01
(3, 4) 0.01
(4, 4) -0.04
Right-side vector:
[[-0.0001]
[-0.0001]
[-0.0001]
[-0.0001]
[-0.0001]]
Solution:
[[ 0.00533708]
[ 0.00617978]
[ 0.00404494]
[ 0.00516854]
[ 0.00533708]]
###Markdown
Here, the matrix and RHS vector are different, but the solution is the same. We've simply factored $D$ into the left side instead of the right side.Now let's try making $D$ actually vary in space. For the sake of illustration, we'll assign a high value to the links on the left, and a 100x lower value to the links on the right. What do you think this will do to the topography?
###Code
# Define an array of D values
D = np.zeros(grid.number_of_links) # we could also make this a grid field if desired
D[grid.x_of_node[grid.node_at_link_head] > 2.0] = 0.001
D[grid.x_of_node[grid.node_at_link_head] <= 2.0] = 0.1
print('D values:')
print(D)
# Build the matrix and right-hand-side vector
mat, rhs = get_core_node_matrix(grid, z, coef_at_link=D)
# Add the correct data to the right-hand-side vector: this time D is on the left side, so
# we don't incorporate it in the right-side vector
rhs -= U
# Let's take a look at them
print('Matrix:')
print(mat)
print('Right-side vector:')
print(rhs)
# Solve: invert the matrix using numpy's linalg.inv() function, then take dot product
z_core = np.dot(np.linalg.inv(mat.toarray()), rhs)
print('Solution:')
print(z_core)
# Insert the solution into the elevation field
z[grid.core_nodes] = z_core.flatten() # flatten because z is a 1D array
# Plot
imshow_grid(grid, z)
###Output
D values:
[ 0.1 0.1 0.001 0.001 0.1 0.1 0.1 0.001 0.001 0.1
0.1 0.001 0.001 0.1 0.1 0.1 0.001 0.001 0.1 0.1
0.001 0.001 0.1 0.1 0.1 0.001 0.001 0.1 0.1 0.001
0.001]
Matrix:
(0, 0) -0.4
(1, 0) 0.1
(3, 0) 0.1
(0, 1) 0.1
(1, 1) -0.301
(2, 1) 0.001
(4, 1) 0.1
(1, 2) 0.001
(2, 2) -0.004
(0, 3) 0.1
(3, 3) -0.4
(4, 3) 0.1
(1, 4) 0.1
(3, 4) 0.1
(4, 4) -0.301
Right-side vector:
[[-0.0001]
[-0.0001]
[-0.0001]
[-0.0001]
[-0.0001]]
Solution:
[[ 0.00063011]
[ 0.00090356]
[ 0.02522589]
[ 0.00061686]
[ 0.00083735]]
###Markdown
Here the lone core cell on the right is surrounded by links at which transport is inefficient; in other words, $D$ is small. Therefore, the cell needs steep slopes on all sides in order to transport out the incoming soil. The other cells are all bordered by at least one link with a high $D$ value, so they don't need much gradient to transport out the incoming material. Comparison with 1D analytical solutionIn the next example, we'll set up an effectively 1D domain, and compare it with the known analytical solution. We can produce a quasi-1D grid by giving it just 3 rows, two of which are boundary rows, and setting the status of those boundaries to *closed*.The expected analytical solution is a parabola:$$z = \frac{UL^2}{D}\left(\frac{x}{L} - \frac{1}{2}\left[\frac{x}{L}\right]^2\right)$$
###Code
from landlab import RasterModelGrid, imshow_grid
from landlab.utils import get_core_node_matrix
import numpy as np
# Define parameter values
U = 0.0001 # uplift rate of material, relative to baselevel, m/yr
D = 0.01 # soil transport coefficient ("diffusivity"), m2/yr
# Create a simple grid
grid = RasterModelGrid((3, 101), xy_spacing=1.0)
# Add a field for topographic elevation
z = grid.add_zeros('topographic__elevation', at='node')
# Set closed boundaries on north and south
grid.set_closed_boundaries_at_grid_edges(False, True, False, True)
# Build the matrix and right-hand-side vector
mat, rhs = get_core_node_matrix(grid, z)
# Add the correct data to the right-hand-side vector
rhs -= U / D
# Solve: invert the matrix using numpy's linalg.inv() function, then take dot product
z_core = np.dot(np.linalg.inv(mat.toarray()), rhs)
# Insert the solution into the elevation field
z[grid.core_nodes] = z_core.flatten() # flatten because z is a 1D array
# Calculate the analytical solution
middle_row = np.arange(101, 202, dtype=np.int) # middle row of grid nodes
x = grid.x_of_node[middle_row] # x coordinates: 0, 1, ... 100
L = 50.0 # half-length of domain
za = (U/D) * (x * L - 0.5 * x * x) # analytical solution
# Plot
import matplotlib.pyplot as plt
plt.plot(x, z[middle_row], 'b.')
plt.plot(x, za, 'r')
plt.xlabel('Distance (m)')
plt.ylabel('Height (m)')
plt.legend(['numerical', 'analytical'])
###Output
_____no_output_____
###Markdown
Hexagonal gridOne advantage of the finite-volume method is that it isn't limited to rectilinear grids. The next example demonstrates this with a tiny hex grid. This wee little grid has just two core nodes, so our matrix will be 2x2. One change is that we need to multiply the RHS values by 3/2 to account for the hex geometry.
###Code
from landlab import HexModelGrid
# Instantiate the grid: here 3 rows, with 3 columns top and bottom and 4 in the middle
hg = HexModelGrid((3, 3))
# Add the elevation field
z = hg.add_zeros('topographic__elevation', at='node')
# Constants, as before
U = 0.0001
D = 0.01
dx = 1.0 # this is the spacing between nodes
# Create the matrix and RHS
mat, rhs = get_core_node_matrix(hg, z)
# Fill in the rest of the RHS vector, including a factor of 3/2 for the hex grid.
rhs[:] += -1.5 * U * dx * dx / D
# Solve
soln = np.dot(np.linalg.inv(mat.toarray()), rhs)
z[hg.core_nodes] = soln.flatten()
print(mat)
print(rhs)
print(z)
###Output
(0, 0) -6.0
(1, 0) 1.0
(0, 1) 1.0
(1, 1) -6.0
[[-0.015]
[-0.015]]
[ 0. 0. 0. 0. 0.003 0.003 0. 0. 0. 0. ]
###Markdown
We can test this. The uplift rate times the cell area represents the volume rate in. Because this is a steady problem, it should equal the volume rate out. The volume rate out across any outer cell face is equal to the gradient across the face times $D$ times the width of the face. The face width in this case is $3^{-1/2}$. Here, the boundaries are all at zero and the distance between nodes is unity, so the gradient is equal to the elevation value. Hence, the flux out across any one face is:$$3^{-1/2} Dz$$and the total flux equals the flux of one face times the number of outer faces, of which there happen to be 10. Here's the calculation:
###Code
# Test: area times 2 cells times uplift rate should equal number of exposed sides times elevation
area = 0.5 * 3.0**0.5
influx = 2 * area * U
outflux = 10 * D * (1.0 / 3.0**0.5) * z[4]
print(influx)
print(outflux)
###Output
0.00017320508075688773
0.000173205080757
###Markdown
Building a matrix for numerical methods using a Landlab grid(Greg Tucker, University of Colorado Boulder, July 2020)*This notebook explains how to use the matrix-building functions to construct a matrix for a finite-volume or finite-difference solution on a Landlab grid.* IntroductionNumerical solutions to differential equations often involve matrices. With grid-based numerical models, like those that Landlab is designed to support, the problem is *discretized* in space: we solve for one or more state variables of interest at a series of discrete spatial locations, such as a grid node or the cell that surrounds it. That process of discretization converts a partial differential equation into a set of ordinary differential equations, with one equation per point. Consider, for example, the one-dimensional diffusion equation:$$\frac{\partial \eta}{\partial t} = D\frac{\partial^2 \eta}{\partial x^2}$$where $t$ is time, $x$ is distance, $D$ is a transport coefficient, and $\eta$ could be concentration of a dissolved chemical (classic chemical diffusion), the temperature in a solid (heat diffusion), the velocity of flow in a viscous liquid (viscous momentum diffusion), or the height of the land on a hillslope (soil diffusion). If the domain is discretized such that we seek the value of $\eta$ at a series of discrete points, then the above equation for a given point $i$ becomes:$$\frac{d \eta_i}{d t} = D\frac{d^2 \eta}{d x^2}\bigg\rvert_i$$where the subscript at the right means "evaluated at $i$". Once the right side has been cast in terms of values of $\eta$ at particular points, you end up with a linear system of equations, and matrix methods provide a natural way to solve them. One example among many is an implicit finite-difference solution to the one-dimensional form of the diffusion equation, which involves constructing a matrix and a "right-hand side" vector, inverting the matrix, and multiplying it by the vector to obtain a solution for the state variable at each grid node (for more on that particular example, see *Mathematical Modeling of Earth's Dynamical Systems* by Slingerland and Kump, or *Numerical Recipes* by Press et al.).When using matrix methods to solve a set of equation at discrete points, whether in 2D or 1D (or even 3D), you typically have an $MxM$ matrix, where $M$ is the number of solution points. Each row in the matrix represents the equation for one of the points. If the equation for a given point includes terms that represent, say, two of its immediately neighboring points, then the columns representing those two points contain non-zero entries. More generally, finite-volume and finite-difference matrices tend be sparse, with only a few non-zero entries in each row: the column that represents the point itself, and the columns representing its immediate neighbors.Building the matrix therefore requires knowledge of which points are connected to which other points in the grid. In 1D, this is easy. In 2D, it's a bit more complicated. Fortunately, the structure of a Landlab grid lends itself to this task. In particular, we know the connectivity for the *nodes* in the grid. It also turns out that when nodes are the solution points (as is typical), the number of equations---and thus $M$---corresponds exactly to the number of *core nodes* in the grid.In the following, we first work through the mathematics in a simple example: a finite-volume matrix solution to a steady diffusion equation with a source term, also known as a Poisson equation. We then show some worked examples of the Landlab matrix tools in action. Example: steady diffusion with a source termConsider the diffusion model for hillslope evolution in two dimensions. The equation describes the time evolution of land surface height, $z$, given a transport coefficient $D$ $[L^2/T]$, and relative uplift rate $U$ $[L/T]$ as:$$\frac{\partial z}{\partial t} = U - \nabla \cdot (-D \nabla z)$$Here $\nabla z$ is the gradient of $z$, which here is a two-element vector (components in the $x$ and $y$ directions, respectively), and $\nabla\cdot$ is the divergence operator. We'll use a matrix method to solve for $z(x)$ when the time derivative is zero. So the equation we want to solve is:$$U \nabla \cdot (-D \nabla z) = 0$$If $D$ is spatially uniform, we can write this as:$$\boxed{\nabla^2 z = -U/D}$$This is the equation we're going to discretize and solve. Here $\nabla^2$ is understood to be the divergence-of-the-gradient, and in 1D would just be a second derivative:$$\frac{d^2z}{dx^2} = -\frac{U}{D}$$The minus sign is important: it indicates upward convexity of the solution when $U$ and $D$ are positive (which they always are in this case). Finite-volume discretizationLet's take a step back in the derivation of the diffusion equation to note that it is composed of two parts together. One part is mass conservation:$$\frac{\partial z}{\partial t} = U - \nabla \cdot \mathbf{q}$$where $\mathbf{q}$ is soil volume flux per unit width $[L^2/T]$. The other part is the flux law:$$\mathbf{q} = -D\nabla z$$For this example, we'll set the time derivative to zero, meaning we are looking for a steady solution.Next, we integrate the conservation law over a 2D region $R$. In general, $R$ is a simply connected region. Ultimately for us, it will be a grid cell, which could be a square, a rectangle, a hexagon, or even an irregular polygon.$$\int\int_R \nabla\cdot \mathbf{q} dR = \int\int_R U dR$$Because $U$ is constant inside the region $R$,$$\int\int_R \nabla\cdot \mathbf{q} dR = U A_r$$Now we apply Green's theorem, which basically says that an area integral over the divergence of a vector field is equivalent to a line integral of the surface-normal component of that vector around the perimeter of the region. Intuitively, if we consider $\mathbf{q}$ to be a flux in this case, what we're saying is that we can obtain the net total flux over the region (grid cell!) by integrating the flux all around the perimeter. Think of it as keeping track of all the people who enter or leave the perimeter of a playing field.$$\oint_S \mathbf{q} \cdot\mathbf{n} dR = U A_r$$where $\mathbf{n}$ is an (outward-facing) unit vector perpendicular to the perimeter $S$ that encloses region $R$. For us, again the perimeter is just the perimeter of the grid cell: the four sides of a square or rectangle, or the six side of a hexagon, the $N$ sides of a Voronoi polygon, or whatever. Then the line integral becomes a summation.We will define a quantity $q$ that represents the face-normal component of $\mathbf{q}$. The sign convention is as follows:- $q$ is positive if the vector orientation is toward the upper-right half space (including "right" and "up")- $q$ is negative if the vector orientation is toward the lower-left half space (including "left" and "down")We will also define a binary variable $\delta$, which is negative if the outward surface-normal points toward the lower-left half space, and positive if it points toward the upper-right half space.Here's where Landlab grids come into the picture. The two definitions represent the use of *links* in a Landlab grid: when $q$ is positive when it oriented in the link's direction, and negative when oriented in the opposite direction. In a simple raster grid, where the links are all horizontal or vertical, the interpretation is very simple: flow to the right (increasing $x$) is positive, and to the left is negative; flow upward (increasing $y$) is positive, and downward is negative.More generally, whatever the grid type, links by convention always "point" toward the upper-right half space; hence the general definition of $q$ above. The variable $\delta$ represents the link orientation relative to the cell: positive when the link points out of the cell, negative when it points into the cell. The variable is represented in a Landlab grid by the array `link_dirs_at_node`: one for each link, starting from the "east" (or "right") direction and going counter-clockwise.Suppose $R$ is a square grid cell of width $\Delta x$. Then:$$\oint_S \mathbf{f} \cdot\mathbf{n} dR = \sum_{k=1}^4 q_k \delta_k \Delta x$$where $q_k$ is the magnitude of the vector field at face $k$, and $\delta = -1$ if the link at face $k$ points inward, and $+1$ if the link points outward.For this Poisson problem (i.e., diffusion with zero time derivative), the flux between the two nodes at either end of a link is approximated as the difference in $z$ divided by the distance, which here is $\Delta x$. For each of the four directions:$q_e = -(D/\Delta x) (z_e - z_i)$$q_n = -(D/\Delta x) (z_n - z_i)$$q_e = -(D/\Delta x) (z_i - z_w)$$q_s = -(D/\Delta x) (z_i - z_s)$Here the subscript refers to the four cardinal directions. When you work out the summation above, you get:$$\sum_{k=1}^4 q_k \delta_k \Delta x = -D (z_e + z_n - + z_w + z_s - 4z_i)$$.Now plug this back into our governing equation, and divide both sides by $A_r = \Delta x^2$:$$-D (z_e + z_n - + z_w + z_s - 4z_i) = U$$or$$\boxed{z_e + z_n - + z_w + z_s - 4z_i = -U/D}$$So the above represents a system of equations: one equation per core node in a Landlab grid. For any given core node, $z_i$ is the elevation of the node itself, and the other four are the elevations of its four neighbors. By the way, for a regular raster grid, this finite-volume setup turns out to be the same as the finite-difference version. Here the directional subscripts will ultimately be replaced with indices of the particular neighboring nodes. Example of a finite-volume setupSuppose we have a raster model grid with 4 rows and 5 columns, so that there are 6 interior nodes. To make it interesting, let's assume that one of the interior nodes is actually a fixed-value boundary. We will also assume that the perimeter nodes are fixed-value boundaries. Fixed-value boundary simply means that we will keep the elevation constant at these nodes. In total, then, there are 5 core nodes at which we wish to solve for $z$. An illustration of the grid, with the lower-left node being node number 0, looks like:`o---o---o---o---o | | | | | o---.---.---o---o | | | | | o---.---.---.---o | | | | | o---o---o---o---o`In the illustration, `.` is a core node, and `o` is a fixed-value boundary node. The numbering of *nodes* looks like this:`15---16---17---18---19 | | | | | 10---11---12---13---14 | | | | | 5--- 6--- 7--- 8--- 9 | | | | | 0--- 1--- 2--- 3--- 4`Here's a version where we number the *core nodes* consecutively:`o---o---o---o---o | | | | | o---3---4---o---o | | | | | o---0---1---2---o | | | | | o---o---o---o---o`These numbers correspond to rows in a matrix that we will construct. For each row, the column representing the node itself gets a -4, corresponding to the boxed equation above. For each of its neighboring **core** nodes, the corresponding column gets a +1. For example, the first row in the matrix, representing core node 0 in the above sketch, will have a -4 in column 0. It will have a +1 in column 1, representing the neighbor to its east, and a +1 in column 3, representing the neighbor to its north. Here's what the matrix should look like:\begin{vmatrix}-4 & 1 & 0 & 1 & 0 \\ 1 & -4 & 1 & 0 & 1 \\ 0 & 1 & -4 & 0 & 0 \\ 1 & 0 & 0 & -4 & 1 \\ 0 & 1 & 0 & 1 & -4 \\\end{vmatrix}But what happens when one or more of the four neighbors is not another core node, but rather a fixed-value boundary? That's actually the case for *all* of the core nodes in the above example. To appreciate how this works, recall that we're going to put all the constant terms on the right-hand side of the equation. To write this out, we need a way to notate both core nodes and fixed-value nodes. Here, we'll use a subscript to index by *core node ID* (for the core nodes), and parentheses to index by *node ID* (for the boundary nodes). With that notation in mind, the equations for the example grid above are:\begin{eqnarray}z_1 + z_3 + z(5) + z(1) - 4z_0 = -U/D \\z_2 + z_4 + z_0 + z(2) - 4z_1 = -U/D \\z(9) + z(13) + z_1 + z(3) - 4z_2 = -U/D \\z_4 + z(16) + z(10) + z_0 - 4z_3 = -U/D \\z(13) + z(17) + z_3 + z_1 - 4z_4 = -U/D \\\end{eqnarray}With this notation, it's easy to spot the fixed-value boundary nodes, whose entries we'll move to the right-side:\begin{eqnarray} - 4z_0 + z_1 + z_3 = -U/D - (z(5) + z(1)) \\z_0 - 4z_1 + z_2 + z_4 = -U/D - z(2) \\z_1 - 4z_2 = -U/D - (z(9) + z(13) + z_1 + z(3)) \\z_0 - 4z_3 + z_4 = -U/D - (z(16) + z(10)) \\z_1 + z_3 - 4z_4 = -U/D - (z(13) + z(17)) \\\end{eqnarray}The above set of equations is represented by the following matrix equation:\begin{gather}\begin{bmatrix}-4 & 1 & 0 & 1 & 0 \\ 1 & -4 & 1 & 0 & 1 \\ 0 & 1 & -4 & 0 & 0 \\ 1 & 0 & 0 & -4 & 1 \\ 0 & 1 & 0 & 1 & -4 \\\end{bmatrix}\begin{bmatrix}z_0 \\z_1 \\z_2 \\z_3 \\z_4\end{bmatrix} =\begin{bmatrix}-U/D - (z(5) + z(1)) \\-U/D - z(2) \\-U/D - (z(9) + z(13) + z_1 + z(3)) \\-U/D - (z(16) + z(10)) \\-U/D - (z(13) + z(17))\end{bmatrix}\end{gather}or more succinctly,$$A\mathbf{z} = \mathbf{b}$$for which the solution is$$\mathbf{z} = A^{-1} \mathbf{b}$$In other words this is the equation that we need to solve by inverting the matrix $A$, which we can do using `numpy.linalg.inv()`. Here's an example:
###Code
import numpy as np
mat = np.array([[-4, 1, 0, 1, 0],
[ 1, -4, 1, 0, 1],
[ 0, 1, -4, 0, 0],
[ 1, 0, 0, -4, 1],
[ 0, 1, 0, 1, -4],])
print(np.linalg.inv(mat))
###Output
[[-0.29353933 -0.08988764 -0.02247191 -0.08426966 -0.04353933]
[-0.08988764 -0.31460674 -0.07865169 -0.04494382 -0.08988764]
[-0.02247191 -0.07865169 -0.26966292 -0.01123596 -0.02247191]
[-0.08426966 -0.04494382 -0.01123596 -0.29213483 -0.08426966]
[-0.04353933 -0.08988764 -0.02247191 -0.08426966 -0.29353933]]
###Markdown
Let's assume for the sake of this example that $U=10^{-4}$ m/yr, $D=10^{-2}$ m$^2$/yr, and all the nodes around the perimeter have zero elevation. What does the solution look like in terms of numbers?
###Code
U = 0.0001
D = 0.01
rhs = -(U / D) + np.zeros((5, 1))
solution = np.dot(np.linalg.inv(mat), rhs) # dot product for matrix-vector multiplication
print(solution)
###Output
[[0.00533708]
[0.00617978]
[0.00404494]
[0.00516854]
[0.00533708]]
###Markdown
You can see from this example that once you have your matrix and right-hand side vector, numpy's linear algebra functions make it straightforward to solve the system. The tricky part is building the matrix and the right-side vector in the first place. This is where Landlab's matrix-building utility comes in. Landlab's matrix-building functionsTo facilitate matrix-based numerical solutions, Landlab's collection of utilities includes two helper functions:- `make_core_node_matrix(grid, value, rhs=None)` creates and returns a matrix like $A$ above for a Landlab grid, as well as a right-hand-side vector. The matrix is returned as an M x M numpy array, where M is the number of core nodes in the grid. The right-hand-side vector is an M x 1 array. Each row in the matrix represents one core node. The rules for building a row, and the corresponding row in the right-hand-side vector, are: - For every *active link* connected to the node, if the link connects to another core node, the corresponding column in the matrix is assigned the value $+1$. For example, in the tiny grid presented earlier, core node 0 is connected to two other core nodes, 1 and 3. Therefore, columns 1 and 3 in row 0 of the matrix are each set to $+1$. - The matrix column representing the node itself is assigned a value equal to $-1$ times the number of active links that connect to the node, which represents the number of neighboring nodes that are not closed-boundary nodes. In the example grid above, core node 0 is connected for four active links, and so row 0, column 0 is set to -4. - All other matrix entries are zero. - For every neighboring *fixed-value boundary* node adjacent to core node $i$, the value at the neighbor node is subtracted from column $i$ of the right-hand side vector. This is how a fixed-value boundary condition is handled. In the example grid above, core node 0 is bordered by two fixed-value boundary nodes (node IDs 1 and 5). The values of $z$ at these two fixed-value nodes are subtracted from row 0 of the right-hand-side boundary vector. - `make_core_node_matrix_var_coef(grid, value, coef, rhs=None)` does basically the same thing, but allows for a spatially variable coefficient ($D$, or its equivalent in your particular problem). In the example above, we assumed that $D$ was constant, and were therefore able to move it to the right side of the equation. But there are plenty of cases where you might want to allow $D$ to vary in space. This function allows that by taking as an input a 1D array containing a value of $D$ for each grid link. The function ensures that $D$ is factored in appropropriately. (Exercise to the reader: use the example above, but with a spatially variable $D$, to work out what "appropriately" means here). Note that when $D$ varies in space, it is included on the left side of the equation (i.e., in the matrix $A$) and **not** in the right-side vector.Both functions return two items: an M x M array (the matrix) and an M x 1 array (for the right-hand-side vector). With both functions, however, it is your job as the user to properly set up your right-hand-side vector. You have two options. The first is to pass in an array as the `rhs` argument. It should be a 1D array of length equal to the number of core nodes. The function will then add the boundary condition information to whatever values you have already put there. The second option is to omit the `rhs` argument. In this case the function will create a "preliminary" version that contains **only** the values needed to handle fixed-value boundary conditions; you must then add the rest of your right-side information to this before solving. For example, in the sample problem above, you would need to add $-U/D$ to each element of your right-hand-side vector, while the function would take care of adding the various boundary $z$ values.Both functions take either a `RasterModelGrid` or a `HexModelGrid` as the first argument. The matrix-creation functions work for both grid types. Note however that if you have a hex grid, you must multiply your right-hand-side vector by 3/2 (exercise: modify the derivation above, accounting for the area and side length of a hexagon, to demonstrate why this is the case). In principle, the same finite-volume solution method should work for other grid types too, but with modifications to handle spatial variation in cell area, face width, and link length. (If irregular-grid functionality is something you need for your application, we encourage you to develop it and submit a pull request!)Both functions also take a `value` array containing the node-based values of interest (e.g., $z$ in the sample problem above). This should by a 1D numpy array of length equal to the total number of grid nodes. Examples using Landlab matrix functions Constant coefficientThe example below uses Landlab to solve the tiny sample problem described above.
###Code
from landlab import RasterModelGrid, imshow_grid
from landlab.utils import make_core_node_matrix
import numpy as np
# Define parameter values
U = 0.0001 # uplift rate of material, relative to baselevel, m/yr
D = 0.01 # soil transport coefficient ("diffusivity"), m2/yr
# Create a simple grid
grid = RasterModelGrid((4, 5), xy_spacing=1.0)
# Add a field for topographic elevation
z = grid.add_zeros('topographic__elevation', at='node')
# Convert one of the interior nodes to boundary
grid.status_at_node[13] = grid.BC_NODE_IS_FIXED_VALUE
# Build the matrix and right-hand-side vector
mat, rhs = make_core_node_matrix(grid, z)
# Add the correct data to the right-hand-side vector
rhs -= U / D
# Let's take a look at them
print('Matrix:')
print(mat)
print('Right-side vector:')
print(rhs)
# Solve: invert the matrix using numpy's linalg.inv() function, then take dot product
z_core = np.dot(np.linalg.inv(mat), rhs)
print('Solution:')
print(z_core)
# Insert the solution into the elevation field
z[grid.core_nodes] = z_core.flatten() # flatten because z is a 1D array
# Plot
imshow_grid(grid, z)
###Output
Matrix:
[[-4. 1. 0. 1. 0.]
[ 1. -4. 1. 0. 1.]
[ 0. 1. -4. 0. 0.]
[ 1. 0. 0. -4. 1.]
[ 0. 1. 0. 1. -4.]]
Right-side vector:
[[-0.01]
[-0.01]
[-0.01]
[-0.01]
[-0.01]]
Solution:
[[ 0.00533708]
[ 0.00617978]
[ 0.00404494]
[ 0.00516854]
[ 0.00533708]]
###Markdown
Note that the solution for our tiny test grid is the same as before, as it should be. Version with a variable coefficientNext, we repeat the above, but for a case of a spatially variable $D$. We'll first do it with an array of $D$ values, one per link, where the $D$ values are the same as above, just to demonstrate that the solution is the same.
###Code
from landlab.utils import make_core_node_matrix_var_coef
# Define an array of D values
D = 0.01 + np.zeros(grid.number_of_links) # we could also make this a grid field if desired
# Build the matrix and right-hand-side vector
mat, rhs = make_core_node_matrix_var_coef(grid, z, D)
# Add the correct data to the right-hand-side vector: this time D is on the left side, so
# we don't incorporate it in the right-side vector
rhs -= U
# Let's take a look at them
print('Matrix:')
print(mat)
print('Right-side vector:')
print(rhs)
# Solve: invert the matrix using numpy's linalg.inv() function, then take dot product
z_core = np.dot(np.linalg.inv(mat), rhs)
print('Solution:')
print(z_core)
# Insert the solution into the elevation field
z[grid.core_nodes] = z_core.flatten() # flatten because z is a 1D array
# Plot
imshow_grid(grid, z)
###Output
Matrix:
[[-0.04 0.01 0. 0.01 0. ]
[ 0.01 -0.04 0.01 0. 0.01]
[ 0. 0.01 -0.04 0. 0. ]
[ 0.01 0. 0. -0.04 0.01]
[ 0. 0.01 0. 0.01 -0.04]]
Right-side vector:
[[-0.0001]
[-0.0001]
[-0.0001]
[-0.0001]
[-0.0001]]
Solution:
[[ 0.00533708]
[ 0.00617978]
[ 0.00404494]
[ 0.00516854]
[ 0.00533708]]
###Markdown
Here, the matrix and RHS vector are different, but the solution is the same. We've simply factored $D$ into the left side instead of the right side.Now let's try making $D$ actually vary in space. For the sake of illustration, we'll assign a high value to the links on the left, and a 100x lower value to the links on the right. What do you think this will do to the topography?
###Code
# Define an array of D values
D = np.zeros(grid.number_of_links) # we could also make this a grid field if desired
D[grid.x_of_node[grid.node_at_link_head] > 2.0] = 0.001
D[grid.x_of_node[grid.node_at_link_head] <= 2.0] = 0.1
print('D values:')
print(D)
# Build the matrix and right-hand-side vector
mat, rhs = make_core_node_matrix_var_coef(grid, z, D)
# Add the correct data to the right-hand-side vector: this time D is on the left side, so
# we don't incorporate it in the right-side vector
rhs -= U
# Let's take a look at them
print('Matrix:')
print(mat)
print('Right-side vector:')
print(rhs)
# Solve: invert the matrix using numpy's linalg.inv() function, then take dot product
z_core = np.dot(np.linalg.inv(mat), rhs)
print('Solution:')
print(z_core)
# Insert the solution into the elevation field
z[grid.core_nodes] = z_core.flatten() # flatten because z is a 1D array
# Plot
imshow_grid(grid, z)
###Output
D values:
[ 0.1 0.1 0.001 0.001 0.1 0.1 0.1 0.001 0.001 0.1
0.1 0.001 0.001 0.1 0.1 0.1 0.001 0.001 0.1 0.1
0.001 0.001 0.1 0.1 0.1 0.001 0.001 0.1 0.1 0.001
0.001]
Matrix:
[[-0.4 0.1 0. 0.1 0. ]
[ 0.1 -0.301 0.001 0. 0.1 ]
[ 0. 0.001 -0.004 0. 0. ]
[ 0.1 0. 0. -0.4 0.1 ]
[ 0. 0.1 0. 0.1 -0.301]]
Right-side vector:
[[-0.0001]
[-0.0001]
[-0.0001]
[-0.0001]
[-0.0001]]
Solution:
[[ 0.00063011]
[ 0.00090356]
[ 0.02522589]
[ 0.00061686]
[ 0.00083735]]
###Markdown
Here the lone core cell on the right is surrounded by links at which transport is inefficient; in other words, $D$ is small. Therefore, the cell needs steep slopes on all sides in order to transport out the incoming soil. The other cells are all bordered by at least one link with a high $D$ value, so they don't need much gradient to transport out the incoming material. Comparison with 1D analytical solutionIn the next example, we'll set up an effectively 1D domain, and compare it with the known analytical solution. We can produce a quasi-1D grid by giving it just 3 rows, two of which are boundary rows, and setting the status of those boundaries to *closed*.The expected analytical solution is a parabola:$$z = \frac{UL^2}{D}\left(\frac{x}{L} - \frac{1}{2}\left[\frac{x}{L}\right]^2\right)$$
###Code
from landlab import RasterModelGrid, imshow_grid
from landlab.utils import make_core_node_matrix, make_core_node_matrix_var_coef
import numpy as np
# Define parameter values
U = 0.0001 # uplift rate of material, relative to baselevel, m/yr
D = 0.01 # soil transport coefficient ("diffusivity"), m2/yr
# Create a simple grid
grid = RasterModelGrid((3, 101), xy_spacing=1.0)
# Add a field for topographic elevation
z = grid.add_zeros('topographic__elevation', at='node')
# Set closed boundaries on north and south
grid.set_closed_boundaries_at_grid_edges(False, True, False, True)
# Build the matrix and right-hand-side vector
mat, rhs = make_core_node_matrix(grid, z)
# Add the correct data to the right-hand-side vector
rhs -= U / D
# Solve: invert the matrix using numpy's linalg.inv() function, then take dot product
z_core = np.dot(np.linalg.inv(mat), rhs)
# Insert the solution into the elevation field
z[grid.core_nodes] = z_core.flatten() # flatten because z is a 1D array
# Calculate the analytical solution
middle_row = np.arange(101, 202, dtype=np.int) # middle row of grid nodes
x = grid.x_of_node[middle_row] # x coordinates: 0, 1, ... 100
L = 50.0 # half-length of domain
za = (U/D) * (x * L - 0.5 * x * x) # analytical solution
# Plot
import matplotlib.pyplot as plt
plt.plot(x, z[middle_row], 'b.')
plt.plot(x, za, 'r')
plt.xlabel('Distance (m)')
plt.ylabel('Height (m)')
plt.legend(['numerical', 'analytical'])
###Output
_____no_output_____
###Markdown
Hexagonal gridOne advantage of the finite-volume method is that it isn't limited to rectilinear grids. The next example demonstrates this with a tiny hex grid. This wee little grid has just two core nodes, so our matrix will be 2x2. One change is that we need to multiply the RHS values by 3/2 to account for the hex geometry.
###Code
from landlab import HexModelGrid
# Instantiate the grid: here 3 rows, with 3 columns top and bottom and 4 in the middle
hg = HexModelGrid((3, 3))
# Add the elevation field
z = hg.add_zeros('topographic__elevation', at='node')
# Constants, as before
U = 0.0001
D = 0.01
dx = 1.0 # this is the spacing between nodes
# Create the matrix and RHS
mat, rhs = make_core_node_matrix(hg, z)
# Fill in the rest of the RHS vector, including a factor of 3/2 for the hex grid.
rhs[:] += -1.5*U*dx*dx/D
# Solve
soln = np.dot(np.linalg.inv(mat), rhs)
z[hg.core_nodes] = soln.flatten()
print(mat)
print(rhs)
print(z)
###Output
[[-6. 1.]
[ 1. -6.]]
[[-0.015]
[-0.015]]
[ 0. 0. 0. 0. 0.003 0.003 0. 0. 0. 0. ]
###Markdown
We can test this. The uplift rate times the cell area represents the volume rate in. Because this is a steady problem, it should equal the volume rate out. The volume rate out across any outer cell face is equal to the gradient across the face times $D$ times the width of the face. The face width in this case is $3^{-1/2}$. Here, the boundaries are all at zero and the distance between nodes is unity, so the gradient is equal to the elevation value. Hence, the flux out across any one face is:$$3^{-1/2} Dz$$and the total flux equals the flux of one face times the number of outer faces, of which there happen to be 10. Here's the calculation:
###Code
# Test: area times 2 cells times uplift rate should equal number of exposed sides times elevation
area = 0.5 * 3.0**0.5
influx = 2 * area * U
outflux = 10 * D * (1.0 / 3.0**0.5) * z[4]
print(influx)
print(outflux)
###Output
0.00017320508075688773
0.000173205080757
###Markdown
Building a matrix for numerical methods using a Landlab grid(Greg Tucker, University of Colorado Boulder, July 2020)*This notebook explains how to use the matrix-building functions to construct a matrix for a finite-volume or finite-difference solution on a Landlab grid.* IntroductionNumerical solutions to differential equations often involve matrices. With grid-based numerical models, like those that Landlab is designed to support, the problem is *discretized* in space: we solve for one or more state variables of interest at a series of discrete spatial locations, such as a grid node or the cell that surrounds it. That process of discretization converts a partial differential equation into a set of ordinary differential equations, with one equation per point. Consider, for example, the one-dimensional diffusion equation:$$\frac{\partial \eta}{\partial t} = D\frac{\partial^2 \eta}{\partial x^2}$$where $t$ is time, $x$ is distance, $D$ is a transport coefficient, and $\eta$ could be concentration of a dissolved chemical (classic chemical diffusion), the temperature in a solid (heat diffusion), the velocity of flow in a viscous liquid (viscous momentum diffusion), or the height of the land on a hillslope (soil diffusion). If the domain is discretized such that we seek the value of $\eta$ at a series of discrete points, then the above equation for a given point $i$ becomes:$$\frac{d \eta_i}{d t} = D\frac{d^2 \eta}{d x^2}\bigg\rvert_i$$where the subscript at the right means "evaluated at $i$". Once the right side has been cast in terms of values of $\eta$ at particular points, you end up with a linear system of equations, and matrix methods provide a natural way to solve them. One example among many is an implicit finite-difference solution to the one-dimensional form of the diffusion equation, which involves constructing a matrix and a "right-hand side" vector, inverting the matrix, and multiplying it by the vector to obtain a solution for the state variable at each grid node (for more on that particular example, see *Mathematical Modeling of Earth's Dynamical Systems* by Slingerland and Kump, or *Numerical Recipes* by Press et al.).When using matrix methods to solve a set of equation at discrete points, whether in 2D or 1D (or even 3D), you typically have an $MxM$ matrix, where $M$ is the number of solution points. Each row in the matrix represents the equation for one of the points. If the equation for a given point includes terms that represent, say, two of its immediately neighboring points, then the columns representing those two points contain non-zero entries. More generally, finite-volume and finite-difference matrices tend be sparse, with only a few non-zero entries in each row: the column that represents the point itself, and the columns representing its immediate neighbors.Building the matrix therefore requires knowledge of which points are connected to which other points in the grid. In 1D, this is easy. In 2D, it's a bit more complicated. Fortunately, the structure of a Landlab grid lends itself to this task. In particular, we know the connectivity for the *nodes* in the grid. It also turns out that when nodes are the solution points (as is typical), the number of equations---and thus $M$---corresponds exactly to the number of *core nodes* in the grid.In the following, we first work through the mathematics in a simple example: a finite-volume matrix solution to a steady diffusion equation with a source term, also known as a Poisson equation. We then show some worked examples of the Landlab matrix tools in action. Example: steady diffusion with a source termConsider the diffusion model for hillslope evolution in two dimensions. The equation describes the time evolution of land surface height, $z$, given a transport coefficient $D$ $[L^2/T]$, and relative uplift rate $U$ $[L/T]$ as:$$\frac{\partial z}{\partial t} = U - \nabla \cdot (-D \nabla z)$$Here $\nabla z$ is the gradient of $z$, which here is a two-element vector (components in the $x$ and $y$ directions, respectively), and $\nabla\cdot$ is the divergence operator. We'll use a matrix method to solve for $z(x)$ when the time derivative is zero. So the equation we want to solve is:$$U \nabla \cdot (-D \nabla z) = 0$$If $D$ is spatially uniform, we can write this as:$$\boxed{\nabla^2 z = -U/D}$$This is the equation we're going to discretize and solve. Here $\nabla^2$ is understood to be the divergence-of-the-gradient, and in 1D would just be a second derivative:$$\frac{d^2z}{dx^2} = -\frac{U}{D}$$The minus sign is important: it indicates upward convexity of the solution when $U$ and $D$ are positive (which they always are in this case). Finite-volume discretizationLet's take a step back in the derivation of the diffusion equation to note that it is composed of two parts together. One part is mass conservation:$$\frac{\partial z}{\partial t} = U - \nabla \cdot \mathbf{q}$$where $\mathbf{q}$ is soil volume flux per unit width $[L^2/T]$. The other part is the flux law:$$\mathbf{q} = -D\nabla z$$For this example, we'll set the time derivative to zero, meaning we are looking for a steady solution.Next, we integrate the conservation law over a 2D region $R$. In general, $R$ is a simply connected region. Ultimately for us, it will be a grid cell, which could be a square, a rectangle, a hexagon, or even an irregular polygon.$$\int\int_R \nabla\cdot \mathbf{q} dR = \int\int_R U dR$$Because $U$ is constant inside the region $R$,$$\int\int_R \nabla\cdot \mathbf{q} dR = U A_r$$Now we apply Green's theorem, which basically says that an area integral over the divergence of a vector field is equivalent to a line integral of the surface-normal component of that vector around the perimeter of the region. Intuitively, if we consider $\mathbf{q}$ to be a flux in this case, what we're saying is that we can obtain the net total flux over the region (grid cell!) by integrating the flux all around the perimeter. Think of it as keeping track of all the people who enter or leave the perimeter of a playing field.$$\oint_S \mathbf{q} \cdot\mathbf{n} dS = U A_r$$where $\mathbf{n}$ is an (outward-facing) unit vector perpendicular to the perimeter $S$ that encloses region $R$. For us, again the perimeter is just the perimeter of the grid cell: the four sides of a square or rectangle, or the six side of a hexagon, the $N$ sides of a Voronoi polygon, or whatever. Then the line integral becomes a summation.We will define a quantity $q$ that represents the face-normal component of $\mathbf{q}$. The sign convention is as follows:- $q$ is positive if the vector orientation is toward the upper-right half space (including "right" and "up")- $q$ is negative if the vector orientation is toward the lower-left half space (including "left" and "down")We will also define a binary variable $\delta$, which is negative if the outward surface-normal points toward the lower-left half space, and positive if it points toward the upper-right half space.Here's where Landlab grids come into the picture. The two definitions represent the use of *links* in a Landlab grid: when $q$ is positive when it oriented in the link's direction, and negative when oriented in the opposite direction. In a simple raster grid, where the links are all horizontal or vertical, the interpretation is very simple: flow to the right (increasing $x$) is positive, and to the left is negative; flow upward (increasing $y$) is positive, and downward is negative.More generally, whatever the grid type, links by convention always "point" toward the upper-right half space; hence the general definition of $q$ above. The variable $\delta$ represents the link orientation relative to the cell: positive when the link points out of the cell, negative when it points into the cell. The variable is represented in a Landlab grid by the array `link_dirs_at_node`: one for each link, starting from the "east" (or "right") direction and going counter-clockwise.Suppose $R$ is a square grid cell of width $\Delta x$. Then:$$\oint_S \mathbf{f} \cdot\mathbf{n} dR = \sum_{k=1}^4 q_k \delta_k \Delta x$$where $q_k$ is the magnitude of the vector field at face $k$, and $\delta = -1$ if the link at face $k$ points inward, and $+1$ if the link points outward.For this Poisson problem (i.e., diffusion with zero time derivative), the flux between the two nodes at either end of a link is approximated as the difference in $z$ divided by the distance, which here is $\Delta x$. For each of the four directions:$q_e = -(D/\Delta x) (z_e - z_i)$$q_n = -(D/\Delta x) (z_n - z_i)$$q_e = -(D/\Delta x) (z_i - z_w)$$q_s = -(D/\Delta x) (z_i - z_s)$Here the subscript refers to the four cardinal directions. When you work out the summation above, you get:$$\sum_{k=1}^4 q_k \delta_k \Delta x = -D (z_e + z_n - + z_w + z_s - 4z_i)$$.Now plug this back into our governing equation, and divide both sides by $A_r = \Delta x^2$:$$-D (z_e + z_n - + z_w + z_s - 4z_i) = U$$or$$\boxed{z_e + z_n - + z_w + z_s - 4z_i = -U/D}$$So the above represents a system of equations: one equation per core node in a Landlab grid. For any given core node, $z_i$ is the elevation of the node itself, and the other four are the elevations of its four neighbors. By the way, for a regular raster grid, this finite-volume setup turns out to be the same as the finite-difference version. Here the directional subscripts will ultimately be replaced with indices of the particular neighboring nodes. Example of a finite-volume setupSuppose we have a raster model grid with 4 rows and 5 columns, so that there are 6 interior nodes. To make it interesting, let's assume that one of the interior nodes is actually a fixed-value boundary. We will also assume that the perimeter nodes are fixed-value boundaries. Fixed-value boundary simply means that we will keep the elevation constant at these nodes. In total, then, there are 5 core nodes at which we wish to solve for $z$. An illustration of the grid, with the lower-left node being node number 0, looks like:`o---o---o---o---o | | | | | o---.---.---o---o | | | | | o---.---.---.---o | | | | | o---o---o---o---o`In the illustration, `.` is a core node, and `o` is a fixed-value boundary node. The numbering of *nodes* looks like this:`15---16---17---18---19 | | | | | 10---11---12---13---14 | | | | | 5--- 6--- 7--- 8--- 9 | | | | | 0--- 1--- 2--- 3--- 4`Here's a version where we number the *core nodes* consecutively:`o---o---o---o---o | | | | | o---3---4---o---o | | | | | o---0---1---2---o | | | | | o---o---o---o---o`These numbers correspond to rows in a matrix that we will construct. For each row, the column representing the node itself gets a -4, corresponding to the boxed equation above. For each of its neighboring **core** nodes, the corresponding column gets a +1. For example, the first row in the matrix, representing core node 0 in the above sketch, will have a -4 in column 0. It will have a +1 in column 1, representing the neighbor to its east, and a +1 in column 3, representing the neighbor to its north. Here's what the matrix should look like:\begin{vmatrix}-4 & 1 & 0 & 1 & 0 \\ 1 & -4 & 1 & 0 & 1 \\ 0 & 1 & -4 & 0 & 0 \\ 1 & 0 & 0 & -4 & 1 \\ 0 & 1 & 0 & 1 & -4 \\\end{vmatrix}But what happens when one or more of the four neighbors is not another core node, but rather a fixed-value boundary? That's actually the case for *all* of the core nodes in the above example. To appreciate how this works, recall that we're going to put all the constant terms on the right-hand side of the equation. To write this out, we need a way to notate both core nodes and fixed-value nodes. Here, we'll use a subscript to index by *core node ID* (for the core nodes), and parentheses to index by *node ID* (for the boundary nodes). With that notation in mind, the equations for the example grid above are:\begin{eqnarray}z_1 + z_3 + z(5) + z(1) - 4z_0 = -U/D \\z_2 + z_4 + z_0 + z(2) - 4z_1 = -U/D \\z(9) + z(13) + z_1 + z(3) - 4z_2 = -U/D \\z_4 + z(16) + z(10) + z_0 - 4z_3 = -U/D \\z(13) + z(17) + z_3 + z_1 - 4z_4 = -U/D \\\end{eqnarray}With this notation, it's easy to spot the fixed-value boundary nodes, whose entries we'll move to the right-side:\begin{eqnarray} - 4z_0 + z_1 + z_3 = -U/D - (z(5) + z(1)) \\z_0 - 4z_1 + z_2 + z_4 = -U/D - z(2) \\z_1 - 4z_2 = -U/D - (z(9) + z(13) + z_1 + z(3)) \\z_0 - 4z_3 + z_4 = -U/D - (z(16) + z(10)) \\z_1 + z_3 - 4z_4 = -U/D - (z(13) + z(17)) \\\end{eqnarray}The above set of equations is represented by the following matrix equation:\begin{gather}\begin{bmatrix}-4 & 1 & 0 & 1 & 0 \\ 1 & -4 & 1 & 0 & 1 \\ 0 & 1 & -4 & 0 & 0 \\ 1 & 0 & 0 & -4 & 1 \\ 0 & 1 & 0 & 1 & -4 \\\end{bmatrix}\begin{bmatrix}z_0 \\z_1 \\z_2 \\z_3 \\z_4\end{bmatrix} =\begin{bmatrix}-U/D - (z(5) + z(1)) \\-U/D - z(2) \\-U/D - (z(9) + z(13) + z_1 + z(3)) \\-U/D - (z(16) + z(10)) \\-U/D - (z(13) + z(17))\end{bmatrix}\end{gather}or more succinctly,$$A\mathbf{z} = \mathbf{b}$$for which the solution is$$\mathbf{z} = A^{-1} \mathbf{b}$$In other words this is the equation that we need to solve by inverting the matrix $A$, which we can do using `numpy.linalg.inv()`. Here's an example:
###Code
import numpy as np
mat = np.array([[-4, 1, 0, 1, 0],
[ 1, -4, 1, 0, 1],
[ 0, 1, -4, 0, 0],
[ 1, 0, 0, -4, 1],
[ 0, 1, 0, 1, -4],])
print(np.linalg.inv(mat))
###Output
[[-0.29353933 -0.08988764 -0.02247191 -0.08426966 -0.04353933]
[-0.08988764 -0.31460674 -0.07865169 -0.04494382 -0.08988764]
[-0.02247191 -0.07865169 -0.26966292 -0.01123596 -0.02247191]
[-0.08426966 -0.04494382 -0.01123596 -0.29213483 -0.08426966]
[-0.04353933 -0.08988764 -0.02247191 -0.08426966 -0.29353933]]
###Markdown
Let's assume for the sake of this example that $U=10^{-4}$ m/yr, $D=10^{-2}$ m$^2$/yr, and all the nodes around the perimeter have zero elevation. What does the solution look like in terms of numbers?
###Code
U = 0.0001
D = 0.01
rhs = -(U / D) + np.zeros((5, 1))
solution = np.dot(np.linalg.inv(mat), rhs) # dot product for matrix-vector multiplication
print(solution)
###Output
[[0.00533708]
[0.00617978]
[0.00404494]
[0.00516854]
[0.00533708]]
###Markdown
You can see from this example that once you have your matrix and right-hand side vector, numpy's linear algebra functions make it straightforward to solve the system. The tricky part is building the matrix and the right-side vector in the first place. This is where Landlab's matrix-building utility comes in. Landlab's matrix-building functionsTo facilitate matrix-based numerical solutions, Landlab's collection of utilities includes two helper functions:- `get_core_node_matrix(grid, value, rhs=None)` creates and returns a matrix like $A$ above for a Landlab grid, as well as a right-hand-side vector. The matrix is returned as an M x M numpy array, where M is the number of core nodes in the grid. The right-hand-side vector is an M x 1 array. Each row in the matrix represents one core node. The rules for building a row, and the corresponding row in the right-hand-side vector, are: - For every *active link* connected to the node, if the link connects to another core node, the corresponding column in the matrix is assigned the value $+1$. For example, in the tiny grid presented earlier, core node 0 is connected to two other core nodes, 1 and 3. Therefore, columns 1 and 3 in row 0 of the matrix are each set to $+1$. - The matrix column representing the node itself is assigned a value equal to $-1$ times the number of active links that connect to the node, which represents the number of neighboring nodes that are not closed-boundary nodes. In the example grid above, core node 0 is connected for four active links, and so row 0, column 0 is set to -4. - All other matrix entries are zero. - For every neighboring *fixed-value boundary* node adjacent to core node $i$, the value at the neighbor node is subtracted from column $i$ of the right-hand side vector. This is how a fixed-value boundary condition is handled. In the example grid above, core node 0 is bordered by two fixed-value boundary nodes (node IDs 1 and 5). The values of $z$ at these two fixed-value nodes are subtracted from row 0 of the right-hand-side boundary vector. - `get_core_node_matrix_var_coef(grid, value, coef_at_link=coef, rhs=None)` does basically the same thing, but allows for a spatially variable coefficient ($D$, or its equivalent in your particular problem). In the example above, we assumed that $D$ was constant, and were therefore able to move it to the right side of the equation. But there are plenty of cases where you might want to allow $D$ to vary in space. This function allows that by taking as an input a 1D array containing a value of $D$ for each grid link. The function ensures that $D$ is factored in appropropriately. (Exercise to the reader: use the example above, but with a spatially variable $D$, to work out what "appropriately" means here). Note that when $D$ varies in space, it is included on the left side of the equation (i.e., in the matrix $A$) and **not** in the right-side vector.Both functions return two items: an M x M array (the matrix) and an M x 1 array (for the right-hand-side vector). With both functions, however, it is your job as the user to properly set up your right-hand-side vector. You have two options. The first is to pass in an array as the `rhs` argument. It should be a 1D array of length equal to the number of core nodes. The function will then add the boundary condition information to whatever values you have already put there. The second option is to omit the `rhs` argument. In this case the function will create a "preliminary" version that contains **only** the values needed to handle fixed-value boundary conditions; you must then add the rest of your right-side information to this before solving. For example, in the sample problem above, you would need to add $-U/D$ to each element of your right-hand-side vector, while the function would take care of adding the various boundary $z$ values.Both functions take either a `RasterModelGrid` or a `HexModelGrid` as the first argument. The matrix-creation functions work for both grid types. Note however that if you have a hex grid, you must multiply your right-hand-side vector by 3/2 (exercise: modify the derivation above, accounting for the area and side length of a hexagon, to demonstrate why this is the case). In principle, the same finite-volume solution method should work for other grid types too, but with modifications to handle spatial variation in cell area, face width, and link length. (If irregular-grid functionality is something you need for your application, we encourage you to develop it and submit a pull request!)Both functions also take a `value` array containing the node-based values of interest (e.g., $z$ in the sample problem above). This should by a 1D numpy array of length equal to the total number of grid nodes. Examples using Landlab matrix functions Constant coefficientThe example below uses Landlab to solve the tiny sample problem described above.
###Code
from landlab import RasterModelGrid, imshow_grid
from landlab.utils import get_core_node_matrix
import numpy as np
# Define parameter values
U = 0.0001 # uplift rate of material, relative to baselevel, m/yr
D = 0.01 # soil transport coefficient ("diffusivity"), m2/yr
# Create a simple grid
grid = RasterModelGrid((4, 5), xy_spacing=1.0)
# Add a field for topographic elevation
z = grid.add_zeros('topographic__elevation', at='node')
# Convert one of the interior nodes to boundary
grid.status_at_node[13] = grid.BC_NODE_IS_FIXED_VALUE
# Build the matrix and right-hand-side vector
mat, rhs = get_core_node_matrix(grid, z)
# Add the correct data to the right-hand-side vector
rhs -= U / D
# Let's take a look at them
print('Matrix:')
print(mat)
print('Right-side vector:')
print(rhs)
# Solve: invert the matrix using numpy's linalg.inv() function, then take dot product
z_core = np.dot(np.linalg.inv(mat.toarray()), rhs)
print('Solution:')
print(z_core)
# Insert the solution into the elevation field
z[grid.core_nodes] = z_core.flatten() # flatten because z is a 1D array
# Plot
imshow_grid(grid, z)
###Output
Matrix:
(0, 0) -4.0
(1, 0) 1.0
(3, 0) 1.0
(0, 1) 1.0
(1, 1) -4.0
(2, 1) 1.0
(4, 1) 1.0
(1, 2) 1.0
(2, 2) -4.0
(0, 3) 1.0
(3, 3) -4.0
(4, 3) 1.0
(1, 4) 1.0
(3, 4) 1.0
(4, 4) -4.0
Right-side vector:
[[-0.01]
[-0.01]
[-0.01]
[-0.01]
[-0.01]]
Solution:
[[ 0.00533708]
[ 0.00617978]
[ 0.00404494]
[ 0.00516854]
[ 0.00533708]]
###Markdown
Note that the solution for our tiny test grid is the same as before, as it should be. Version with a variable coefficientNext, we repeat the above, but for a case of a spatially variable $D$. We'll first do it with an array of $D$ values, one per link, where the $D$ values are the same as above, just to demonstrate that the solution is the same.
###Code
from landlab.utils import get_core_node_matrix
# Define an array of D values
D = 0.01 + np.zeros(grid.number_of_links) # we could also make this a grid field if desired
# Build the matrix and right-hand-side vector
mat, rhs = get_core_node_matrix(grid, z, coef_at_link=D)
# Add the correct data to the right-hand-side vector: this time D is on the left side, so
# we don't incorporate it in the right-side vector
rhs -= U
# Let's take a look at them
print('Matrix:')
print(mat)
print('Right-side vector:')
print(rhs)
# Solve: invert the matrix using numpy's linalg.inv() function, then take dot product
z_core = np.dot(np.linalg.inv(mat.toarray()), rhs)
print('Solution:')
print(z_core)
# Insert the solution into the elevation field
z[grid.core_nodes] = z_core.flatten() # flatten because z is a 1D array
# Plot
imshow_grid(grid, z)
###Output
Matrix:
(0, 0) -0.04
(1, 0) 0.01
(3, 0) 0.01
(0, 1) 0.01
(1, 1) -0.04
(2, 1) 0.01
(4, 1) 0.01
(1, 2) 0.01
(2, 2) -0.04
(0, 3) 0.01
(3, 3) -0.04
(4, 3) 0.01
(1, 4) 0.01
(3, 4) 0.01
(4, 4) -0.04
Right-side vector:
[[-0.0001]
[-0.0001]
[-0.0001]
[-0.0001]
[-0.0001]]
Solution:
[[ 0.00533708]
[ 0.00617978]
[ 0.00404494]
[ 0.00516854]
[ 0.00533708]]
###Markdown
Here, the matrix and RHS vector are different, but the solution is the same. We've simply factored $D$ into the left side instead of the right side.Now let's try making $D$ actually vary in space. For the sake of illustration, we'll assign a high value to the links on the left, and a 100x lower value to the links on the right. What do you think this will do to the topography?
###Code
# Define an array of D values
D = np.zeros(grid.number_of_links) # we could also make this a grid field if desired
D[grid.x_of_node[grid.node_at_link_head] > 2.0] = 0.001
D[grid.x_of_node[grid.node_at_link_head] <= 2.0] = 0.1
print('D values:')
print(D)
# Build the matrix and right-hand-side vector
mat, rhs = get_core_node_matrix(grid, z, coef_at_link=D)
# Add the correct data to the right-hand-side vector: this time D is on the left side, so
# we don't incorporate it in the right-side vector
rhs -= U
# Let's take a look at them
print('Matrix:')
print(mat)
print('Right-side vector:')
print(rhs)
# Solve: invert the matrix using numpy's linalg.inv() function, then take dot product
z_core = np.dot(np.linalg.inv(mat.toarray()), rhs)
print('Solution:')
print(z_core)
# Insert the solution into the elevation field
z[grid.core_nodes] = z_core.flatten() # flatten because z is a 1D array
# Plot
imshow_grid(grid, z)
###Output
D values:
[ 0.1 0.1 0.001 0.001 0.1 0.1 0.1 0.001 0.001 0.1
0.1 0.001 0.001 0.1 0.1 0.1 0.001 0.001 0.1 0.1
0.001 0.001 0.1 0.1 0.1 0.001 0.001 0.1 0.1 0.001
0.001]
Matrix:
(0, 0) -0.4
(1, 0) 0.1
(3, 0) 0.1
(0, 1) 0.1
(1, 1) -0.301
(2, 1) 0.001
(4, 1) 0.1
(1, 2) 0.001
(2, 2) -0.004
(0, 3) 0.1
(3, 3) -0.4
(4, 3) 0.1
(1, 4) 0.1
(3, 4) 0.1
(4, 4) -0.301
Right-side vector:
[[-0.0001]
[-0.0001]
[-0.0001]
[-0.0001]
[-0.0001]]
Solution:
[[ 0.00063011]
[ 0.00090356]
[ 0.02522589]
[ 0.00061686]
[ 0.00083735]]
###Markdown
Here the lone core cell on the right is surrounded by links at which transport is inefficient; in other words, $D$ is small. Therefore, the cell needs steep slopes on all sides in order to transport out the incoming soil. The other cells are all bordered by at least one link with a high $D$ value, so they don't need much gradient to transport out the incoming material. Comparison with 1D analytical solutionIn the next example, we'll set up an effectively 1D domain, and compare it with the known analytical solution. We can produce a quasi-1D grid by giving it just 3 rows, two of which are boundary rows, and setting the status of those boundaries to *closed*.The expected analytical solution is a parabola:$$z = \frac{UL^2}{D}\left(\frac{x}{L} - \frac{1}{2}\left[\frac{x}{L}\right]^2\right)$$
###Code
from landlab import RasterModelGrid, imshow_grid
from landlab.utils import get_core_node_matrix
import numpy as np
# Define parameter values
U = 0.0001 # uplift rate of material, relative to baselevel, m/yr
D = 0.01 # soil transport coefficient ("diffusivity"), m2/yr
# Create a simple grid
grid = RasterModelGrid((3, 101), xy_spacing=1.0)
# Add a field for topographic elevation
z = grid.add_zeros('topographic__elevation', at='node')
# Set closed boundaries on north and south
grid.set_closed_boundaries_at_grid_edges(False, True, False, True)
# Build the matrix and right-hand-side vector
mat, rhs = get_core_node_matrix(grid, z)
# Add the correct data to the right-hand-side vector
rhs -= U / D
# Solve: invert the matrix using numpy's linalg.inv() function, then take dot product
z_core = np.dot(np.linalg.inv(mat.toarray()), rhs)
# Insert the solution into the elevation field
z[grid.core_nodes] = z_core.flatten() # flatten because z is a 1D array
# Calculate the analytical solution
middle_row = np.arange(101, 202, dtype=int) # middle row of grid nodes
x = grid.x_of_node[middle_row] # x coordinates: 0, 1, ... 100
L = 50.0 # half-length of domain
za = (U/D) * (x * L - 0.5 * x * x) # analytical solution
# Plot
import matplotlib.pyplot as plt
plt.plot(x, z[middle_row], 'b.')
plt.plot(x, za, 'r')
plt.xlabel('Distance (m)')
plt.ylabel('Height (m)')
plt.legend(['numerical', 'analytical'])
###Output
_____no_output_____
###Markdown
Hexagonal gridOne advantage of the finite-volume method is that it isn't limited to rectilinear grids. The next example demonstrates this with a tiny hex grid. This wee little grid has just two core nodes, so our matrix will be 2x2. One change is that we need to multiply the RHS values by 3/2 to account for the hex geometry.
###Code
from landlab import HexModelGrid
# Instantiate the grid: here 3 rows, with 3 columns top and bottom and 4 in the middle
hg = HexModelGrid((3, 3))
# Add the elevation field
z = hg.add_zeros('topographic__elevation', at='node')
# Constants, as before
U = 0.0001
D = 0.01
dx = 1.0 # this is the spacing between nodes
# Create the matrix and RHS
mat, rhs = get_core_node_matrix(hg, z)
# Fill in the rest of the RHS vector, including a factor of 3/2 for the hex grid.
rhs[:] += -1.5 * U * dx * dx / D
# Solve
soln = np.dot(np.linalg.inv(mat.toarray()), rhs)
z[hg.core_nodes] = soln.flatten()
print(mat)
print(rhs)
print(z)
###Output
(0, 0) -6.0
(1, 0) 1.0
(0, 1) 1.0
(1, 1) -6.0
[[-0.015]
[-0.015]]
[ 0. 0. 0. 0. 0.003 0.003 0. 0. 0. 0. ]
###Markdown
We can test this. The uplift rate times the cell area represents the volume rate in. Because this is a steady problem, it should equal the volume rate out. The volume rate out across any outer cell face is equal to the gradient across the face times $D$ times the width of the face. The face width in this case is $3^{-1/2}$. Here, the boundaries are all at zero and the distance between nodes is unity, so the gradient is equal to the elevation value. Hence, the flux out across any one face is:$$3^{-1/2} Dz$$and the total flux equals the flux of one face times the number of outer faces, of which there happen to be 10. Here's the calculation:
###Code
# Test: area times 2 cells times uplift rate should equal number of exposed sides times elevation
area = 0.5 * 3.0**0.5
influx = 2 * area * U
outflux = 10 * D * (1.0 / 3.0**0.5) * z[4]
print(influx)
print(outflux)
###Output
0.00017320508075688773
0.000173205080757
|
20181001_kaggle_salt_upload.ipynb | ###Markdown
[View in Colaboratory](https://colab.research.google.com/github/kentaojapi/dataanalysis_tgs-salt/blob/master/20181001_kaggle_salt_upload.ipynb)
###Code
# download API key from google drive
## Original: https://colab.research.google.com/drive/1eufc8aNCdjHbrBhuy7M7X6BGyzAyRbrF#scrollTo=y5_288BYp6H1
## When you run for the first time, you will see a link to authenticate.
from googleapiclient.discovery import build
import io, os
from googleapiclient.http import MediaIoBaseDownload
from google.colab import auth
auth.authenticate_user()
drive_service = build('drive', 'v3')
results = drive_service.files().list(
q="name = 'kaggle.json'", fields="files(id)").execute()
kaggle_api_key = results.get('files', [])
filename = "/content/.kaggle/kaggle.json"
os.makedirs(os.path.dirname(filename), exist_ok=True)
request = drive_service.files().get_media(fileId=kaggle_api_key[0]['id'])
fh = io.FileIO(filename, 'wb')
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download %d%%." % int(status.progress() * 100))
os.chmod(filename, 600)
!pip install kaggle
!mkdir ~/.kaggle
!cp /content/.kaggle/kaggle.json ~/.kaggle/kaggle.json
!kaggle competitions download -c tgs-salt-identification-challenge
!unzip train.zip
!mv images/ train/
!unzip test.zip
!mv images/ test/
!rm train.zip
!rm test.zip
!df -h
from IPython.display import clear_output
clear_output()
import os
import sys
import random
import warnings
import pandas as pd
import numpy as np
from matplotlib import pyplot
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
import cv2
from tqdm import tqdm_notebook, tnrange, trange
from itertools import chain
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
from keras.models import Model, load_model
from keras.layers import Input, Dense, Dropout, Activation, MaxPooling2D, Flatten
from keras.layers.convolutional import Conv2D, UpSampling2D
from keras.layers.core import Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers.normalization import BatchNormalization
from keras import backend as K
from keras.optimizers import Adam, SGD
from keras import losses
from keras.preprocessing import image
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from google.colab import files
from jinja2 import Template
im_width = 128
im_height = 128
im_chan = 1
path_train = '../content/train/'
path_test = '../content/test/'
ids= ['1f1cc6b3a4','5b7c160d0d','6c40978ddf','7dfdf6eeb8','7e5a6e5013']
plt.figure(figsize=(20,10))
for j, img_name in enumerate(ids):
q = j+1
img = load_img(path_train + img_name + '.png')
img_mask = load_img('../content/masks/' + img_name + '.png')
plt.subplot(1,2*(1+len(ids)),q*2-1)
plt.imshow(img)
plt.subplot(1,2*(1+len(ids)),q*2)
plt.imshow(img_mask)
plt.show()
df_depth = pd.read_csv('../content/depths.csv')
train_ids = next(os.walk(path_train))[2]
test_ids = next(os.walk(path_test))[2]
df_train_depth = df_depth.query('id in {0}'.format(list(map(lambda x: x.split('.')[0], train_ids))))
df_train_depth = df_train_depth.sort_values(by=['z']).reset_index(drop=True)
df_test_depth = df_depth.query('id in {0}'.format(list(map(lambda x: x.split('.')[0], test_ids))))
df_test_depth = df_test_depth.sort_values(by=['z']).reset_index(drop=True)
X_train = np.zeros((len(train_ids), im_height, im_width, im_chan), dtype=np.uint8)
y_train = np.zeros((len(train_ids), im_height, im_width, 1), dtype=np.uint8)
print('Getting and resizing train images and masks ...')
sys.stdout.flush()
for n, id_ in enumerate(train_ids):
img = load_img(path_train + id_)
x = img_to_array(img)[:, :, 1]
x = resize(x, (128, 128, 1), mode='constant', preserve_range=True)
X_train[n] = x
mask = img_to_array(load_img('../content/masks/' + id_))[:, :, 1]
y = resize(mask, (128, 128, 1), mode='constant', preserve_range=True)
y_train[n] = y
print('Done')
X_test = np.zeros((len(test_ids), im_height, im_width, im_chan), dtype=np.uint8)
sizes_test = []
print('Getting and resizing test images ... ')
sys.stdout.flush()
for n, id_ in enumerate(test_ids):
img = load_img(path_test + id_)
x = img_to_array(img)[:, :, 1]
sizes_test.append([x.shape[0], x.shape[1]])
x = resize(x, (128, 128, 1), mode='constant', preserve_range=True)
X_test[n] = x
print('Done!')
print("X_train.shape : ", X_train.shape)
print("X_test.shape : ", X_test.shape)
# reshape train(4000, 128, 128, 1) to (4000, )
X_train_norm = X_train.reshape(4000, 128*128).sum(axis=1) / 255
y_train_norm = y_train.reshape(4000, 128*128).sum(axis=1) / 255
df_train_salt_norm = pd.DataFrame({
'img': X_train_norm,
'mask': y_train_norm,
'depth': df_train_depth.z,
'id': df_train_depth.id
})
# reshape test(18000, 128, 128, 1) to (18000, )
X_test_norm = X_test.reshape(18000, 128*128).sum(axis=1) / 255
df_test_salt_norm = pd.DataFrame({
'img': X_test_norm,
'depth': df_test_depth.z,
'id': df_test_depth.id
})
df_train_salt_norm.head()
fig , ax = plt.subplots(nrows=2, ncols=2)
plt.subplots_adjust(hspace=1, wspace=0.5)
df_train_salt_norm.query('mask < 200')['mask'].hist(bins=30, ax=ax[0][0])
ax[0][0].set_title('histogram of mask' )
df_train_salt_norm.plot(kind='scatter', x='img', y='mask', ax=ax[0][1])
ax[0][1].set_title('img mask')
df_train_salt_norm.plot(kind='scatter', x='depth', y='img', ax=ax[1][0])
ax[1][0].set_title('depth img')
df_train_salt_norm.plot(kind='scatter', x='depth', y='mask', ax=ax[1][1])
ax[1][1].set_title('depth mask')
fig , ax = plt.subplots(nrows=2, ncols=2)
plt.subplots_adjust(hspace=1, wspace=0.5)
df_train_salt_norm.plot(kind='scatter', x='depth', y='img', ax=ax[0][0])
ax[0][0].set_title('[Train] depth img')
df_test_salt_norm.plot(kind='scatter', x='depth', y='img', ax=ax[0][1])
ax[0][1].set_title('[Test] depth img')
df_train_salt_norm.depth.hist(bins=60, ax=ax[1][0])
ax[1][0].set_title('[Train] histgram of depth')
df_test_salt_norm.depth.hist(bins=60, ax=ax[1][1])
ax[1][1].set_title('[Test] histgram of depth')
begin = 0
show_num = 10
for n, ix in enumerate(range(begin, show_num)):
plt.figure(figsize=(10, show_num*5))
print(ix, df_train_depth.iloc[ix].id, df_train_depth.iloc[ix].z)
plt.subplot(show_num, 2, n * 2 + 1)
plt.imshow(np.dstack((X_train[ix],X_train[ix],X_train[ix])))
plt.subplot(show_num, 2, n * 2 + 2)
tmp = np.squeeze(y_train[ix]).astype(np.float32)/255.0
plt.imshow(np.dstack((tmp,tmp,tmp)))
plt.show()
def rotate_img(x, theta, row_axis=0, col_axis=1, channel_axis=2, fill_mode='nearest', cval=0.):
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = image.transform_matrix_offset_center(rotation_matrix, h, w)
x = image.apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_rotate(img, mask, rotate_limit=(-20, 20), u=0.5):
if np.random.random() < u:
theta = np.pi / 180 * np.random.uniform(rotate_limit[0], rotate_limit[1])
img = rotate(img, theta)
mask = rotate(mask, theta)
return img, mask
def flip_img(x, flip_number):
return cv2.flip(x, flip_number)
img_flips = np.zeros((len(X_train), im_height, im_width, im_chan), dtype=np.uint8)
mask_flips = np.zeros((len(y_train), im_height, im_width, 1), dtype=np.uint8)
for i in range(len(X_train)):
img_flip = flip_img(X_train[i], 0)
mask_flip = flip_img(y_train[i], 0)
img_flips[i] = img_flip.reshape(im_width, im_height, im_chan)
mask_flips[i] = mask_flip.reshape(im_width, im_height, 1)
X_train = np.vstack((X_train, img_flips))
y_train = np.vstack((y_train, mask_flips))
print('the length of combined X_train : ', len(X_train))
print('the length of combined y_train: ', len(y_train))
X_train_norm = X_train / 255.0
y_train_norm = y_train / 255.0
def mean_iou(y_true, y_pred):
prec = []
for t in np.arange(0.5, 1.0, 0.05):
y_pred_ = tf.to_int32(y_pred > t)
score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([up_opt]):
score = tf.identity(score)
prec.append(score)
return K.mean(K.stack(prec), axis=0)
def iou_loss(y_true, y_pred):
y_true = tf.reshape(y_true, [-1])
y_pred = tf.reshape(y_pred, [-1])
intersection = tf.reduce_sum(y_true * y_pred)
score = (intersection + 1.) / (tf.reduce_sum(y_true) + tf.reduce_sum(y_pred) - intersection + 1.)
return 1 - score
def iou_bce_loss(y_true, y_pred):
return 0.5 * losses.binary_crossentropy(y_true, y_pred) + 0.5 * iou_loss(y_true, y_pred)
class UNet(object):
def __init__(self, input_size):
self.INPUT_SIZE = input_size
print(self.INPUT_SIZE)
inputs = Input((self.INPUT_SIZE, self.INPUT_SIZE, 1))
#s = Lambda(lambda x: x /255.0) (inputs)
print(inputs.shape)
encode_layer1 = self.__add_encode_layers(8, inputs, is_first=True)
encode_layer2 = self.__add_encode_layers(16, encode_layer1)
encode_layer3 = self.__add_encode_layers(32, encode_layer2)
encode_layer4 = self.__add_encode_layers(64, encode_layer3)
encode_layer5 = self.__add_encode_layers(128, encode_layer4)
decode_layer1 = self.__add_decode_layers(64, encode_layer5, encode_layer4, add_drop_layer=False)
decode_layer2 = self.__add_decode_layers(32, decode_layer1, encode_layer3, add_drop_layer=True)
decode_layer3 = self.__add_decode_layers(16, decode_layer2, encode_layer2, add_drop_layer=False)
decode_layer4 = self.__add_decode_layers(8, decode_layer3, encode_layer1, add_drop_layer=True)
outputs = Conv2D(1, 1, activation='sigmoid')(decode_layer4)
print(outputs.shape)
self.MODEL = Model(inputs=[inputs], outputs=[outputs])
def __add_encode_layers(self, filter_size, input_layer, is_first=False):
layer = input_layer
if is_first:
layer = Conv2D(filter_size, (3, 3), padding='same', input_shape=(self.INPUT_SIZE, self.INPUT_SIZE, 1))(layer)
else:
layer = MaxPooling2D(2)(layer)
layer = Conv2D(filter_size, 3, padding='same')(layer)
layer = Activation(activation='relu')(layer)
layer = Conv2D(filter_size, 3, padding='same')(layer)
layer = BatchNormalization()(layer)
layer = Activation(activation='relu')(layer)
layer = Conv2D(filter_size, 3, padding='same')(layer)
layer = BatchNormalization()(layer)
print(layer.shape)
return layer
def __add_decode_layers(self, filter_size, input_layer, concat_layer, add_drop_layer=False):
layer = UpSampling2D(2)(input_layer)
layer = concatenate([layer, concat_layer])
layer = Conv2D(filter_size, 3, padding='same')(layer)
layer = Activation(activation='relu')(layer)
layer = Conv2D(filter_size, 3, padding='same')(layer)
layer = BatchNormalization()(layer)
if add_drop_layer:
layer = Dropout(0.5)(layer)
print(layer.shape)
return layer
def model(self):
return self.MODEL
network = UNet(im_width)
optimizer = SGD(lr=0.01)
model = network.model()
model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=['accuracy', mean_iou])
model.summary()
import datetime
now = datetime.datetime.now().strftime('%Y%m%d%H%m%S')
save_file = 'model-tgs-salt-1-{}.h5'.format(now)
earlystopper = EarlyStopping(patience=10, verbose=1)
reduce_lr = ReduceLROnPlateau(patience=5, verbose=1)
checkpointer = ModelCheckpoint(save_file, verbose=1, save_best_only=True)
results = model.fit(X_train_norm, y_train_norm, batch_size=32, validation_split=0.2, epochs=50, callbacks=[earlystopper, checkpointer])
from google.colab import drive
drive.mount('/content/drive')
model.save("drive/My Drive/kaggle/{}".format(save_file))
history = results
fig, (ax_loss, ax_acc) = plt.subplots(1, 2, figsize=(15,5))
ax_loss.plot(history.epoch, history.history["loss"], label="Train loss")
ax_loss.plot(history.epoch, history.history["val_loss"], label="Validation loss")
ax_acc.plot(history.epoch, history.history["mean_iou"], label="Train accuracy")
ax_acc.plot(history.epoch, history.history["val_mean_iou"], label="Validation accuracy")
history = results
fig, (ax_loss, ax_acc) = plt.subplots(1, 2, figsize=(15,5))
ax_loss.plot(history.epoch, history.history["loss"], label="Train loss")
ax_loss.plot(history.epoch, history.history["val_loss"], label="Validation loss")
ax_acc.plot(history.epoch, history.history["mean_iou"], label="Train accuracy")
ax_acc.plot(history.epoch, history.history["val_mean_iou"], label="Validation accuracy")
from google.colab import drive
drive.mount('/content/drive')
model = load_model("drive/My Drive/kaggle/model-tgs-salt-1-20180930120927.h5",
custom_objects={'mean_iou': mean_iou})
X_test_norm = X_test / 255.0
preds_train = model.predict(X_train_norm[:int(X_train_norm.shape[0]*0.8)], verbose=1)
preds_val = model.predict(X_train_norm[int(X_train_norm.shape[0]*0.8):], verbose=1)
preds_test = model.predict(X_test_norm, verbose=1)
preds_train_t = (preds_train > 0.5).astype(np.uint8)
preds_val_t = (preds_val > 0.5).astype(np.uint8)
preds_test_t = (preds_test > 0.5).astype(np.uint8)
preds_test_upsampled = []
for i in range(len(preds_test)):
preds_test_upsampled.append(resize(np.squeeze(preds_test[i]),
(sizes_test[i][0], sizes_test[i][1]),
mode='constant', preserve_range=True))
def RLenc(img, order='F', format=True):
"""
img is binary mask image, shape (r,c)
order is down-then-right, i.e. Fortran
format determines if the order needs to be preformatted (according to submission rules) or not
returns run length as an array or string (if format is True)
"""
bytes = img.reshape(img.shape[0] * img.shape[1], order=order)
runs = [] ## list of run lengths
r = 0 ## the current run length
pos = 1 ## count starts from 1 per WK
for c in bytes:
if (c == 0):
if r != 0:
runs.append((pos, r))
pos += r
r = 0
pos += 1
else:
r += 1
# if last run is unsaved (i.e. data ends with 1)
if r != 0:
runs.append((pos, r))
pos += r
r = 0
if format:
z = ''
for rr in runs:
z += '{} {} '.format(rr[0], rr[1])
return z[:-1]
else:
return runs
pred_dict = {fn[:-4]:RLenc(np.round(preds_test_upsampled[i])) for i,fn in enumerate(test_ids)}
import datetime
now = datetime.datetime.now().strftime('%Y%m%d%H%m%S')
submit_name = 'submission_{}.csv'.format(now)
sub = pd.DataFrame.from_dict(pred_dict,orient='index')
sub.index.names = ['id']
sub.columns = ['rle_mask']
sub.to_csv(submit_name)
from google.colab import drive
drive.mount('/content/drive')
!cp "../content/$submit_name" "drive/My Drive/kaggle/"
!kaggle competitions submit -c tgs-salt-identification-challenge -f "drive/My Drive/kaggle/$submit_name" -m "$now"
###Output
_____no_output_____ |
DoximityScraper.ipynb | ###Markdown
Testing w/ 2 radiologists
###Code
radiologists = ["Richard Duszak", "Andrew Rosenkrantz"]
prefix = 'https://www.doximity.com/pub/'
user_agent = {'User-agent' : 'Mozilla/5.0'}
input_name = radiologists[0].replace(' ', '-').lower()
degree = "-md"
url = prefix+input_name+degree
page = requests.get(url, headers=user_agent)
soup = BeautifulSoup(page.text, "html.parser")
#####################
full_name = soup.find("span", class_="user-full-name").text
first_name = soup.find("span", class_="user-name-first").text
last_name = soup.find("span", class_="user-name-last").text
specialty = soup.find("a", class_="profile-head-subtitle").text
city_state = soup.find("span", class_="profile-head-subtitle").text
subspecialty = soup.find("p", class_="user-subspecialty").text
job_title = soup.find("p", class_="user-job-title").text
st_address = soup.find("span", class_="black profile-contact-labels-wrap").text
PRE_phone_num = soup.find("div", class_="profile-contact-labels-wrap")
phone_num = PRE_phone_num.find("span", class_="black").text
ed_trn_elements = soup.find("ul", class_="profile-sectioned-list training").find_all("div", class_="profile-section-wrapper-text")
for element in ed_trn_elements:
institution = element.find("span", class_="black").text
education = element.find("span", class_="br").text
print(institution)
print(education)
print()
########################
# print(full_name)
# print(first_name)
# print(last_name)
# print(specialty)
# print(city_state)
# print(subspecialty)
# print(job_title)
# print(st_address)
# print(phone_num)
radiologists = ["Richard Duszak", "Andrew Rosenkrantz", "Nishant De Quadros"]
for rad in radiologists:
print(rad.replace(' ', '-').lower())
# Import data
scraper_list = pd.read_csv(r"C:\Users\ssantavicca3\Documents\Work Files & Folders\RadiologyTrainees_NotABot\scraper_list.csv")
scraper_list.head()
for index, rad in scraper_list[0:5].iterrows():
print(rad["first_nm"])
# (scraper_list["first_nm"][0]+"-"+scraper_list["last_nm"][0]).lower()
# scraper_list[0:2].iat[0,0]
for index, rad in scraper_list[0:5].iterrows():
npi = rad["NPI"]
input_name = (rad["first_nm"]+" "+rad["last_nm"]).replace(' ', '-').lower()
# input_name = rad.replace(' ', '-').lower()
print(npi)
print(input_name)
# Initialize request details and lists to dump data dicts
prefix = 'https://www.doximity.com/pub/'
user_agent = {'User-agent' : 'Mozilla/5.0'}
verif_data = []
ed_trn_data = []
# Loop through each radiologist and scrape data of interest
with elapsed_timer() as elapsed:
n_iter = 0
for index, rad in scraper_list.iterrows():
npi = rad["NPI"]
input_name = (rad["first_nm"]+" "+rad["last_nm"]).replace(' ', '-').lower() # replace() covers two part last names (e.g., De Soto)
# Beautiful Soup
try:
degree = "md"
url = prefix+input_name+"-"+degree
page = requests.get(url, headers=user_agent)
except:
# Account for DOs, or skip iteration if no hits at all
try:
degree = "do"
url = prefix+input_name+"-"+degree
page = requests.get(url, headers=user_agent)
except:
continue
soup = BeautifulSoup(page.text, "html.parser")
## Verification data
verif_details = {}
verif_details['NPI'] = npi #ID
verif_details['url'] = url
# Parse out exact text we want & add data to the dictionary
try:
verif_details['full_name'] = soup.find("span", class_="user-full-name").text
except:
pass
try:
verif_details['first_name'] = soup.find("span", class_="user-name-first").text
verif_details['last_name'] = soup.find("span", class_="user-name-last").text
verif_details['credentials'] = soup.find("span", class_="user-name-credentials").text
except:
pass
try:
verif_details['specialty'] = soup.find("a", class_="profile-head-subtitle").text
except:
pass
try:
city_state = soup.find("span", class_="profile-head-subtitle").text
verif_details['city_state'] = city_state[2:] #remove "/.n" from beginning of these strings
except:
pass
try:
verif_details['subspecialty'] = soup.find("p", class_="user-subspecialty").text
except:
pass
try:
verif_details['job_title'] = soup.find("p", class_="user-job-title").text
except:
pass
try:
verif_details['st_address'] = soup.find(itemprop="streetAddress").get("content")
verif_details['zip'] = soup.find(itemprop="postalCode").get("content")
verif_details['st_address_full'] = soup.find("span", class_="black profile-contact-labels-wrap").text
except:
pass
try:
verif_details['phone_num'] = soup.find("div", class_="profile-contact-labels-wrap").find("span", class_="black").text
except:
pass
# Append the dictionaries to the list
verif_details['idx_scraper_list'] = index
verif_data.append(verif_details)
## Education and training data
try:
ed_trn_elements = soup.find("ul", class_="profile-sectioned-list training").find_all("div", class_="profile-section-wrapper-text")
for element in ed_trn_elements:
ed_trn_details = {}
ed_trn_details['NPI'] = npi #ID
# Parse out exact text we want & add data to the dictionary
try:
ed_trn_details['institution'] = element.find("span", class_="black").text
except:
pass
try:
ed_trn_details['education'] = element.find("span", class_="br").text
except:
pass
# Append the dictionaries to the list
ed_trn_details['idx_scraper_list'] = index
ed_trn_data.append(ed_trn_details)
except:
pass
## Counter and timer for progress checks
n_iter += 1
if n_iter % 100 == 0:
print("Iteration (radiologists): "+str(n_iter)+" ----- Time Elapsed: "+str(timedelta(seconds=round(elapsed()))))
sys.stdout.flush()
# Save intermediate output incase of crash or timeout
filename1 = "datadump/doximity dump/saved_ed_trn_list.txt"
with open(filename1, 'w') as f:
for item in ed_trn_data:
f.write(f'{item}\n')
sys.stdout.flush()
filename2 = "datadump/doximity dump/saved_verif_list.txt"
with open(filename2, 'w') as f:
for item in verif_data:
f.write(f'{item}\n')
sys.stdout.flush()
# Iteration delay
sleep(randint(1,2))
## Create and format a DF from the list of dictionaries
ed_trn_df = pd.DataFrame.from_dict(ed_trn_data)
verif_df = pd.DataFrame.from_dict(verif_data)
len(ed_trn_df)
len(verif_df)
###Output
_____no_output_____
###Markdown
Just need to run loop on full list of radiologists. Before doing so, maybe run on first 25-50 rads to (1) get an estimate of ETA, and (2) determine whether there are any bugs that need to be accounted for before full run (e.g., maybe a try/except for MDs/DOs/no page at all). May also need to do a follow up scraper with google searches (e.g., "Michael Fishman MD doximity radiology") do get the profiles of people with either NaN or non-radiology specialties that likely shared name with multiple other physicians. Eg:https://www.doximity.com/pub/michael-fishman-md-ff5f620ahttps://www.doximity.com/pub/michael-fishman-md-acf558dehttps://www.doximity.com/pub/michael-fishman-md-8b21aefd EXPLORE THE MISSING DATA MORE FIRST i.e., it looks like there are plenty of urls that work but didn't pick up any information (at least in the verif_df file); it even looks like they're missing at consistent intervals??? For the 11,000 or so from the ed_trn_df file, this doesn't appear to be an issue?
###Code
# Code to read back in the .txt files created above (i.e., if I close the kernal before saving the local files created above)
from ast import literal_eval
def txt_to_dict(filename):
tmp_data = list()
with open(filename, 'r', encoding='cp1252') as f:
rows = f.readlines()
for row in rows:
row = literal_eval(row)
tmp_data.append(row)
return tmp_data
ed_trn_dict = txt_to_dict(r"C:\Users\ssantavicca3\Documents\Work Files & Folders\RadiologyTrainees_NotABot\datadump\doximity dump\saved_ed_trn_list.txt")
verif_dict = txt_to_dict(r"C:\Users\ssantavicca3\Documents\Work Files & Folders\RadiologyTrainees_NotABot\datadump\doximity dump\saved_verif_list.txt")
ed_trn_df = pd.DataFrame.from_dict(ed_trn_dict)
verif_df = pd.DataFrame.from_dict(verif_dict)
ed_trn_df.to_csv(r"C:\Users\ssantavicca3\Documents\Work Files & Folders\RadiologyTrainees_NotABot\ed_trn_df.csv", index=False)
verif_df.to_csv(r"C:\Users\ssantavicca3\Documents\Work Files & Folders\RadiologyTrainees_NotABot\verif_df.csv", index=False)
###Output
_____no_output_____ |
PyTorch/Learning/Part 2 - Neural Networks in PyTorch (Exercises).ipynb | ###Markdown
Neural networks with PyTorchDeep learning networks tend to be massive with dozens or hundreds of layers, that's where the term "deep" comes from. You can build one of these deep networks using only weight matrices as we did in the previous notebook, but in general it's very cumbersome and difficult to implement. PyTorch has a nice module `nn` that provides a nice way to efficiently build large neural networks.
###Code
# Import necessary packages
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import torch
import helper
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Now we're going to build a larger network that can solve a (formerly) difficult problem, identifying text in an image. Here we'll use the MNIST dataset which consists of greyscale handwritten digits. Each image is 28x28 pixels, you can see a sample belowOur goal is to build a neural network that can take one of these images and predict the digit in the image.First up, we need to get our dataset. This is provided through the `torchvision` package. The code below will download the MNIST dataset, then create training and test datasets for us. Don't worry too much about the details here, you'll learn more about this later.
###Code
### Run this cell
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
# Download and load the training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
###Output
_____no_output_____
###Markdown
We have the training data loaded into `trainloader` and we make that an iterator with `iter(trainloader)`. Later, we'll use this to loop through the dataset for training, like```pythonfor image, label in trainloader: do things with images and labels```You'll notice I created the `trainloader` with a batch size of 64, and `shuffle=True`. The batch size is the number of images we get in one iteration from the data loader and pass through our network, often called a *batch*. And `shuffle=True` tells it to shuffle the dataset every time we start going through the data loader again. But here I'm just grabbing the first batch so we can check out the data. We can see below that `images` is just a tensor with size `(64, 1, 28, 28)`. So, 64 images per batch, 1 color channel, and 28x28 images.
###Code
dataiter = iter(trainloader)
images, labels = dataiter.next()
print(type(images))
print(images.shape)
print(labels.shape)
###Output
<class 'torch.Tensor'>
torch.Size([64, 1, 28, 28])
torch.Size([64])
###Markdown
This is what one of the images looks like.
###Code
plt.imshow(images[1].numpy().squeeze(), cmap='Greys_r');
###Output
_____no_output_____
###Markdown
First, let's try to build a simple network for this dataset using weight matrices and matrix multiplications. Then, we'll see how to do it using PyTorch's `nn` module which provides a much more convenient and powerful method for defining network architectures.The networks you've seen so far are called *fully-connected* or *dense* networks. Each unit in one layer is connected to each unit in the next layer. In fully-connected networks, the input to each layer must be a one-dimensional vector (which can be stacked into a 2D tensor as a batch of multiple examples). However, our images are 28x28 2D tensors, so we need to convert them into 1D vectors. Thinking about sizes, we need to convert the batch of images with shape `(64, 1, 28, 28)` to a have a shape of `(64, 784)`, 784 is 28 times 28. This is typically called *flattening*, we flattened the 2D images into 1D vectors.Previously you built a network with one output unit. Here we need 10 output units, one for each digit. We want our network to predict the digit shown in an image, so what we'll do is calculate probabilities that the image is of any one digit or class. This ends up being a discrete probability distribution over the classes (digits) that tells us the most likely class for the image. That means we need 10 output units for the 10 classes (digits). We'll see how to convert the network output into a probability distribution next.> **Exercise:** Flatten the batch of images `images`. Then build a multi-layer network with 784 input units, 256 hidden units, and 10 output units using random tensors for the weights and biases. For now, use a sigmoid activation for the hidden layer. Leave the output layer without an activation, we'll add one that gives us a probability distribution next.
###Code
## Your solution
torch.manual_seed(7)
def activation(x):
return 1/(1+torch.exp(-x))
input = images.view(images.shape[0], -1)
w1 = torch.randn(784,256)
w2 = torch.randn(256,10)
b1 = torch.randn(256)
b2 = torch.randn(10)
h = activation(torch.mm(input,w1)+b1)
out = activation(torch.mm(h,w2) + b2)
out.shape
#out = # output of your network, should have shape (64,10)
###Output
_____no_output_____
###Markdown
Now we have 10 outputs for our network. We want to pass in an image to our network and get out a probability distribution over the classes that tells us the likely class(es) the image belongs to. Something that looks like this:Here we see that the probability for each class is roughly the same. This is representing an untrained network, it hasn't seen any data yet so it just returns a uniform distribution with equal probabilities for each class.To calculate this probability distribution, we often use the [**softmax** function](https://en.wikipedia.org/wiki/Softmax_function). Mathematically this looks like$$\Large \sigma(x_i) = \cfrac{e^{x_i}}{\sum_k^K{e^{x_k}}}$$What this does is squish each input $x_i$ between 0 and 1 and normalizes the values to give you a proper probability distribution where the probabilites sum up to one.> **Exercise:** Implement a function `softmax` that performs the softmax calculation and returns probability distributions for each example in the batch. Note that you'll need to pay attention to the shapes when doing this. If you have a tensor `a` with shape `(64, 10)` and a tensor `b` with shape `(64,)`, doing `a/b` will give you an error because PyTorch will try to do the division across the columns (called broadcasting) but you'll get a size mismatch. The way to think about this is for each of the 64 examples, you only want to divide by one value, the sum in the denominator. So you need `b` to have a shape of `(64, 1)`. This way PyTorch will divide the 10 values in each row of `a` by the one value in each row of `b`. Pay attention to how you take the sum as well. You'll need to define the `dim` keyword in `torch.sum`. Setting `dim=0` takes the sum across the rows while `dim=1` takes the sum across the columns.
###Code
def softmax(x):
return (torch.exp(x)/torch.sum(torch.exp(x), dim = 1).view(-1,1))
## TODO: Implement the softmax function here
# Here, out should be the output of the network in the previous excercise with shape (64,10)
out.shape
probabilities = softmax(out)
# Does it have the right shape? Should be (64, 10)
print(probabilities.shape)
# Does it sum to 1?
print(probabilities.sum(dim=1))
###Output
torch.Size([64, 10])
tensor([1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000,
1.0000])
###Markdown
Building networks with PyTorchPyTorch provides a module `nn` that makes building networks much simpler. Here I'll show you how to build the same one as above with 784 inputs, 256 hidden units, 10 output units and a softmax output.
###Code
from torch import nn
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.hidden = nn.Linear(784, 256)
# Output layer, 10 units - one for each digit
self.output = nn.Linear(256, 10)
# Define sigmoid activation and softmax output
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
# Pass the input tensor through each of our operations
x = self.hidden(x)
x = self.sigmoid(x)
x = self.output(x)
x = self.softmax(x)
return x
###Output
_____no_output_____
###Markdown
Let's go through this bit by bit.```pythonclass Network(nn.Module):```Here we're inheriting from `nn.Module`. Combined with `super().__init__()` this creates a class that tracks the architecture and provides a lot of useful methods and attributes. It is mandatory to inherit from `nn.Module` when you're creating a class for your network. The name of the class itself can be anything.```pythonself.hidden = nn.Linear(784, 256)```This line creates a module for a linear transformation, $x\mathbf{W} + b$, with 784 inputs and 256 outputs and assigns it to `self.hidden`. The module automatically creates the weight and bias tensors which we'll use in the `forward` method. You can access the weight and bias tensors once the network (`net`) is created with `net.hidden.weight` and `net.hidden.bias`.```pythonself.output = nn.Linear(256, 10)```Similarly, this creates another linear transformation with 256 inputs and 10 outputs.```pythonself.sigmoid = nn.Sigmoid()self.softmax = nn.Softmax(dim=1)```Here I defined operations for the sigmoid activation and softmax output. Setting `dim=1` in `nn.Softmax(dim=1)` calculates softmax across the columns.```pythondef forward(self, x):```PyTorch networks created with `nn.Module` must have a `forward` method defined. It takes in a tensor `x` and passes it through the operations you defined in the `__init__` method.```pythonx = self.hidden(x)x = self.sigmoid(x)x = self.output(x)x = self.softmax(x)```Here the input tensor `x` is passed through each operation and reassigned to `x`. We can see that the input tensor goes through the hidden layer, then a sigmoid function, then the output layer, and finally the softmax function. It doesn't matter what you name the variables here, as long as the inputs and outputs of the operations match the network architecture you want to build. The order in which you define things in the `__init__` method doesn't matter, but you'll need to sequence the operations correctly in the `forward` method.Now we can create a `Network` object.
###Code
# Create the network and look at it's text representation
model = Network()
model
###Output
_____no_output_____
###Markdown
You can define the network somewhat more concisely and clearly using the `torch.nn.functional` module. This is the most common way you'll see networks defined as many operations are simple element-wise functions. We normally import this module as `F`, `import torch.nn.functional as F`.
###Code
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.hidden = nn.Linear(784, 256)
# Output layer, 10 units - one for each digit
self.output = nn.Linear(256, 10)
def forward(self, x):
# Hidden layer with sigmoid activation
x = F.sigmoid(self.hidden(x))
# Output layer with softmax activation
x = F.softmax(self.output(x), dim=1)
return x
###Output
_____no_output_____
###Markdown
Activation functionsSo far we've only been looking at the sigmoid activation function, but in general any function can be used as an activation function. The only requirement is that for a network to approximate a non-linear function, the activation functions must be non-linear. Here are a few more examples of common activation functions: Tanh (hyperbolic tangent), and ReLU (rectified linear unit).In practice, the ReLU function is used almost exclusively as the activation function for hidden layers. Your Turn to Build a Network> **Exercise:** Create a network with 784 input units, a hidden layer with 128 units and a ReLU activation, then a hidden layer with 64 units and a ReLU activation, and finally an output layer with a softmax activation as shown above. You can use a ReLU activation with the `nn.ReLU` module or `F.relu` function.It's good practice to name your layers by their type of network, for instance 'fc' to represent a fully-connected layer. As you code your solution, use `fc1`, `fc2`, and `fc3` as your layer names.
###Code
## Your solution here
class Network(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64,10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.softmax(self.fc3(x), dim = 1)
return x
model = Network()
model
###Output
_____no_output_____
###Markdown
Initializing weights and biasesThe weights and such are automatically initialized for you, but it's possible to customize how they are initialized. The weights and biases are tensors attached to the layer you defined, you can get them with `model.fc1.weight` for instance.
###Code
print(model.fc1.weight)
print(model.fc1.bias)
###Output
Parameter containing:
tensor([[ 0.0074, 0.0126, 0.0136, ..., -0.0305, 0.0031, -0.0169],
[-0.0056, -0.0313, 0.0250, ..., -0.0276, -0.0035, -0.0011],
[-0.0059, -0.0284, -0.0092, ..., 0.0146, 0.0092, -0.0347],
...,
[-0.0227, 0.0313, -0.0265, ..., -0.0280, 0.0268, -0.0074],
[-0.0203, 0.0048, 0.0218, ..., -0.0283, 0.0236, -0.0209],
[-0.0037, 0.0138, -0.0274, ..., 0.0268, -0.0338, -0.0260]],
requires_grad=True)
Parameter containing:
tensor([-0.0285, -0.0111, -0.0321, -0.0245, 0.0211, -0.0246, 0.0001, -0.0341,
-0.0258, -0.0197, -0.0157, 0.0031, -0.0341, -0.0336, -0.0139, 0.0004,
0.0233, 0.0224, -0.0200, 0.0323, 0.0096, -0.0054, -0.0003, -0.0297,
0.0311, 0.0010, 0.0088, -0.0158, 0.0205, 0.0158, 0.0315, 0.0085,
-0.0243, -0.0132, -0.0239, 0.0058, 0.0026, 0.0080, 0.0260, -0.0191,
-0.0356, 0.0034, -0.0335, 0.0233, 0.0279, -0.0070, -0.0074, 0.0322,
0.0262, -0.0090, -0.0314, -0.0205, -0.0155, -0.0294, 0.0357, -0.0350,
-0.0291, 0.0166, 0.0013, 0.0132, -0.0317, 0.0305, -0.0180, -0.0324,
0.0264, 0.0306, -0.0005, 0.0178, 0.0242, -0.0183, -0.0020, 0.0176,
-0.0194, -0.0265, -0.0244, 0.0306, 0.0338, -0.0276, 0.0281, -0.0272,
-0.0111, 0.0127, 0.0270, -0.0086, 0.0199, 0.0308, -0.0144, -0.0003,
0.0041, -0.0050, -0.0213, -0.0102, 0.0271, -0.0162, -0.0159, -0.0022,
-0.0261, 0.0228, 0.0085, -0.0109, 0.0198, 0.0353, 0.0080, 0.0238,
0.0114, -0.0234, -0.0075, 0.0019, 0.0192, 0.0333, -0.0166, 0.0226,
0.0340, 0.0151, -0.0354, 0.0313, -0.0102, -0.0273, -0.0297, -0.0136,
-0.0131, -0.0081, -0.0102, -0.0223, 0.0348, 0.0005, -0.0018, -0.0321],
requires_grad=True)
###Markdown
For custom initialization, we want to modify these tensors in place. These are actually autograd *Variables*, so we need to get back the actual tensors with `model.fc1.weight.data`. Once we have the tensors, we can fill them with zeros (for biases) or random normal values.
###Code
# Set biases to all zeros
model.fc1.bias.data.fill_(0)
# sample from random normal with standard dev = 0.01
model.fc1.weight.data.normal_(std=0.01)
###Output
_____no_output_____
###Markdown
Forward passNow that we have a network, let's see what happens when we pass in an image.
###Code
# Grab some data
dataiter = iter(trainloader)
images, labels = dataiter.next()
# Resize images into a 1D vector, new shape is (batch size, color channels, image pixels)
images.resize_(64, 1, 784)
# or images.resize_(images.shape[0], 1, 784) to automatically get batch size
# Forward pass through the network
img_idx = 0
ps = model.forward(images[img_idx,:])
img = images[img_idx]
helper.view_classify(img.view(1, 28, 28), ps)
###Output
_____no_output_____
###Markdown
As you can see above, our network has basically no idea what this digit is. It's because we haven't trained it yet, all the weights are random! Using `nn.Sequential`PyTorch provides a convenient way to build networks like this where a tensor is passed sequentially through operations, `nn.Sequential` ([documentation](https://pytorch.org/docs/master/nn.htmltorch.nn.Sequential)). Using this to build the equivalent network:
###Code
# Hyperparameters for our network
input_size = 784
hidden_sizes = [128, 64]
output_size = 10
# Build a feed-forward network
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
nn.ReLU(),
nn.Linear(hidden_sizes[1], output_size),
nn.Softmax(dim=1))
print(model)
# Forward pass through the network and display output
images, labels = next(iter(trainloader))
images.resize_(images.shape[0], 1, 784)
ps = model.forward(images[0,:])
helper.view_classify(images[0].view(1, 28, 28), ps)
###Output
Sequential(
(0): Linear(in_features=784, out_features=128, bias=True)
(1): ReLU()
(2): Linear(in_features=128, out_features=64, bias=True)
(3): ReLU()
(4): Linear(in_features=64, out_features=10, bias=True)
(5): Softmax(dim=1)
)
###Markdown
Here our model is the same as before: 784 input units, a hidden layer with 128 units, ReLU activation, 64 unit hidden layer, another ReLU, then the output layer with 10 units, and the softmax output.The operations are available by passing in the appropriate index. For example, if you want to get first Linear operation and look at the weights, you'd use `model[0]`.
###Code
print(model[0])
model[0].weight
###Output
Linear(in_features=784, out_features=128, bias=True)
###Markdown
You can also pass in an `OrderedDict` to name the individual layers and operations, instead of using incremental integers. Note that dictionary keys must be unique, so _each operation must have a different name_.
###Code
from collections import OrderedDict
model = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, hidden_sizes[0])),
('relu1', nn.ReLU()),
('fc2', nn.Linear(hidden_sizes[0], hidden_sizes[1])),
('relu2', nn.ReLU()),
('output', nn.Linear(hidden_sizes[1], output_size)),
('softmax', nn.Softmax(dim=1))]))
model
###Output
_____no_output_____
###Markdown
Now you can access layers either by integer or the name
###Code
print(model[0])
print(model.fc1)
###Output
Linear(in_features=784, out_features=128, bias=True)
Linear(in_features=784, out_features=128, bias=True)
|
Project exploration.ipynb | ###Markdown
visualising some of the inputs label
###Code
sns.pairplot(data=data[data.columns[1:7]], hue='rotations')
plt.show()
# No apperant data relationship here and so we move to plotting the Keras framework
# distribution of the target age
ax = plt.figure(figsize=(30, 8))
sns.countplot(data.patient_age)
axis_font = {'fontname':'Arial', 'size':'24'}
plt.xlabel('age', **axis_font)
plt.ylabel('Count', **axis_font)
# distribution of the target labels
ax = plt.figure(figsize=(30, 8))
sns.countplot(data.labels)
axis_font = {'fontname':'Arial', 'size':'24'}
plt.xlabel('age', **axis_font)
plt.ylabel('Count', **axis_font)
###Output
_____no_output_____
###Markdown
Pre processing of the images to fit the required model
###Code
data.head()
from keras_preprocessing.image import ImageDataGenerator
datagen=ImageDataGenerator(rescale=1./255)
# Keras input shape for inceptionV3 model
input_shape = (299, 299, 3)
train_generator=datagen.flow_from_dataframe(dataframe=data, directory=PATH_TO_SAMPLES, x_col="image_index", y_col="labels", has_ext=True, class_mode="categorical", target_size=(299,299), batch_size=32)
###Output
Found 5606 images belonging to 8 classes.
###Markdown
TODO : fix validation set to be seperate from the training data
###Code
datagen=ImageDataGenerator(rescale=1./255)
# Keras input shape for inceptionV3 model
input_shape = (299, 299, 3)
valid_generator=datagen.flow_from_dataframe(dataframe=data, directory=PATH_TO_SAMPLES, x_col="image_index", y_col="labels", has_ext=True, class_mode="categorical", target_size=(299,299), batch_size=32)
from keras.preprocessing.image import array_to_img, img_to_array, load_img
img = load_img('sample/images/00000013_026.png') # this is a PIL image
x = img_to_array(img) # this is a Numpy array with shape (300, 300, 3)
np.shape(x)
###Output
_____no_output_____
###Markdown
Loading the model from our previous session
###Code
os.listdir("sample/")
from keras.models import load_model
model = load_model('sample/my_model.h5')
new_model = load_model('sample/my_model_V1.h5')
###Output
/anaconda3/lib/python3.6/site-packages/keras/engine/saving.py:292: UserWarning: No training configuration found in save file: the model was *not* compiled. Compile it manually.
warnings.warn('No training configuration found in save file: '
###Markdown
Tweaking the model to the required shpae for multiclass, classification Compiling the model
###Code
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
nClasses= data.labels.unique()
print(len(nClasses))
###Output
8
###Markdown
Using Seuential to build model from pre defined option
###Code
#Using Sequiential to build an appropriate model
from keras.models import Sequential
from keras.layers import Dense
from keras import layers
base_model = model
nClasses= len(data.labels.unique())
new_model = Sequential()
new_model.add(base_model)
new_model.add(layers.Flatten())
new_model.add(layers.Dense(1024, activation='relu'))
new_model.add(layers.Dropout(0.5))
new_model.add(Dense(nClasses, activation='softmax'))
new_model.summary()
new_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Some of the actual code to train the model
###Code
from keras.callbacks import TensorBoard
from time import time
# Code I should use to train the model on the relevant data, or somehting similar.
STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=valid_generator.n//valid_generator.batch_size
tensorboard = TensorBoard(log_dir="logs/{}".format(time()))
history = new_model.fit_generator(generator=train_generator,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
steps_per_epoch = 2)
history.history
###Output
_____no_output_____
###Markdown
Smapling the model effectiveness
###Code
#Choose an index form the data
index = 3400
data[index:index+2]
from keras.models import load_model
from keras.preprocessing import image
import numpy as np
IMAGES_PATH = 'sample/images/'
sample = data.image_index[index]
FULL_PATH = IMAGES_PATH + sample
print("[INFO] loading and preprocessing image...")
# dimensions of our images
img_width, img_height = 299, 299
# predicting images
img = image.load_img(FULL_PATH, target_size=(img_width, img_height))
x = image.img_to_array(img)
# important! otherwise the predictions will be '0'
x = x / 255
x = np.expand_dims(x, axis=0)
# get the bottleneck prediction from the new_model
bottleneck_prediction = new_model.predict(x)
#Fetching the relevant result from the prediction of the data
ID = np.argmax(bottleneck_prediction[0])
# Dictionary object of the generator labels
class_dictionary = train_generator.class_indices
inv_map = {v: k for k, v in class_dictionary.items()}
label = inv_map[ID]
print("Image ID: {}, label from data : {}".format(data.image_index[index], data.labels[index]))
print("Image ID: {}, prediciton: {} ,Predicted Label: {}".format(data.image_index[index], bottleneck_prediction[0],label))
###Output
[INFO] loading and preprocessing image...
Image ID: 00016778_039.png, label from data : AP-180
Image ID: 00016778_039.png, prediciton: [0. 0. 1. 0. 0. 0. 0. 0.] ,Predicted Label: AP-90
###Markdown
Visualising satistics for the training phase
###Code
#Printing the relevant values for the acccuracy over time
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
TODO : fix and find there is missing values for accuracy, loss..etc
###Code
history.history
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
Saving this new working model
###Code
# For future changes
from keras.models import load_model
new_model.save('sample/my_model_V1.h5') # creates a HDF5 file 'my_model_V1.h5'
###Output
_____no_output_____
###Markdown
Loading Model and getting some prediction
###Code
model = keras.applications.inception_v3.InceptionV3(include_top=False, weights='imagenet', input_tensor=None, input_shape=(299, 299, 3)
, pooling=None, classes=8)
# For future changes
from keras.models import load_model
model.save('sample/my_model.h5') # creates a HDF5 file 'my_model.h5'
del model # deletes the existing model
# returns a compiled model
# identical to the previous one
model = load_model('sample/my_model.h5')
###Output
/anaconda3/lib/python3.6/site-packages/keras/engine/saving.py:292: UserWarning: No training configuration found in save file: the model was *not* compiled. Compile it manually.
warnings.warn('No training configuration found in save file: '
###Markdown
Some utility calls I might have used before
###Code
import ctypes
a = "hello world"
print(ctypes.cast(id(a), ctypes.py_object).value)
import gc
def objects_by_id(id_):
for obj in gc.get_objects():
if id(obj) == id_:
return obj
raise Exception("No found")
data.columns = [c.lower().replace(' ', '_') for c in data.columns]
data.head(20)
data = pd.read_csv(PATH_TO_META_SAMPLES)
data['patient_age'] = [int(str_age.strip('YMD')) for str_age in data['patient_age']]
n = len(data['image_index'])
labels = []
for index, row in data.iterrows():
temp = row['view_position'] + "-" + str(row['rotations'])
labels.append(temp)
data.to_csv('sample/updated_rotated_samples.csv', index=False)
###Output
_____no_output_____ |
docs/source/user_guide/automl.ipynb | ###Markdown
Automated Machine Learning (AutoML) Search Background Machine Learning[Machine learning](https://en.wikipedia.org/wiki/Machine_learning) (ML) is the process of constructing a mathematical model of a system based on a sample dataset collected from that system.One of the main goals of training an ML model is to teach the model to separate the signal present in the data from the noise inherent in system and in the data collection process. If this is done effectively, the model can then be used to make accurate predictions about the system when presented with new, similar data. Additionally, introspecting on an ML model can reveal key information about the system being modeled, such as which inputs and transformations of the inputs are most useful to the ML model for learning the signal in the data, and are therefore the most predictive.There are [a variety](https://en.wikipedia.org/wiki/Machine_learningApproaches) of ML problem types. Supervised learning describes the case where the collected data contains an output value to be modeled and a set of inputs with which to train the model. EvalML focuses on training supervised learning models.EvalML supports three common supervised ML problem types. The first is regression, where the target value to model is a continuous numeric value. Next are binary and multiclass classification, where the target value to model consists of two or more discrete values or categories. The choice of which supervised ML problem type is most appropriate depends on domain expertise and on how the model will be evaluated and used. EvalML is currently building support for supervised time series problems: time series regression, time series binary classification, and time series multiclass classification. While we've added some features to tackle these kinds of problems, our functionality is still being actively developed so please be mindful of that before using it. AutoML and Search[AutoML](https://en.wikipedia.org/wiki/Automated_machine_learning) is the process of automating the construction, training and evaluation of ML models. Given a data and some configuration, AutoML searches for the most effective and accurate ML model or models to fit the dataset. During the search, AutoML will explore different combinations of model type, model parameters and model architecture.An effective AutoML solution offers several advantages over constructing and tuning ML models by hand. AutoML can assist with many of the difficult aspects of ML, such as avoiding overfitting and underfitting, imbalanced data, detecting data leakage and other potential issues with the problem setup, and automatically applying best-practice data cleaning, feature engineering, feature selection and various modeling techniques. AutoML can also leverage search algorithms to optimally sweep the hyperparameter search space, resulting in model performance which would be difficult to achieve by manual training. AutoML in EvalMLEvalML supports all of the above and more.In its simplest usage, the AutoML search interface requires only the input data, the target data and a `problem_type` specifying what kind of supervised ML problem to model.** Graphing methods, like verbose AutoMLSearch, on Jupyter Notebook and Jupyter Lab require [ipywidgets](https://ipywidgets.readthedocs.io/en/latest/user_install.html) to be installed.** If graphing on Jupyter Lab, [jupyterlab-plotly](https://plotly.com/python/getting-started/jupyterlab-support-python-35) required. To download this, make sure you have [npm](https://nodejs.org/en/download/) installed.
###Code
import evalml
from evalml.utils import infer_feature_types
X, y = evalml.demos.load_fraud(n_rows=250)
###Output
_____no_output_____
###Markdown
To provide data to EvalML, it is recommended that you initialize a [Woodwork accessor](https://woodwork.alteryx.com/en/stable/) on your data. This allows you to easily control how EvalML will treat each of your features before training a model.EvalML also accepts ``pandas`` input, and will run type inference on top of the input ``pandas`` data. If you'd like to change the types inferred by EvalML, you can use the `infer_feature_types` utility method, which takes pandas or numpy input and converts it to a Woodwork data structure. The `feature_types` parameter can be used to specify what types specific columns should be.Feature types such as `Natural Language` must be specified in this way, otherwise Woodwork will infer it as `Unknown` type and drop it during the AutoMLSearch.In the example below, we reformat a couple features to make them easily consumable by the model, and then specify that the provider, which would have otherwise been inferred as a column with natural language, is a categorical column.
###Code
X.ww['expiration_date'] = X['expiration_date'].apply(lambda x: '20{}-01-{}'.format(x.split("/")[1], x.split("/")[0]))
X = infer_feature_types(X, feature_types= {'store_id': 'categorical',
'expiration_date': 'datetime',
'lat': 'categorical',
'lng': 'categorical',
'provider': 'categorical'})
###Output
_____no_output_____
###Markdown
In order to validate the results of the pipeline creation and optimization process, we will save some of our data as a holdout set.
###Code
X_train, X_holdout, y_train, y_holdout = evalml.preprocessing.split_data(X, y, problem_type='binary', test_size=.2)
###Output
_____no_output_____
###Markdown
Data ChecksBefore calling `AutoMLSearch.search`, we should run some sanity checks on our data to ensure that the input data being passed will not run into some common issues before running a potentially time-consuming search. EvalML has various data checks that makes this easy. Each data check will return a collection of warnings and errors if it detects potential issues with the input data. This allows users to inspect their data to avoid confusing errors that may arise during the search process. You can learn about each of the data checks available through our [data checks guide](data_checks.ipynb) Here, we will run the `DefaultDataChecks` class, which contains a series of data checks that are generally useful.
###Code
from evalml.data_checks import DefaultDataChecks
data_checks = DefaultDataChecks("binary", "log loss binary")
data_checks.validate(X_train, y_train)
###Output
_____no_output_____
###Markdown
Since there were no warnings or errors returned, we can safely continue with the search process.
###Code
automl = evalml.automl.AutoMLSearch(X_train=X_train, y_train=y_train, problem_type='binary', verbose=True)
automl.search()
###Output
_____no_output_____
###Markdown
With the `verbose` argument set to True, the AutoML search will log its progress, reporting each pipeline and parameter set evaluated during the search.There are a number of mechanisms to control the AutoML search time. One way is to set the `max_batches` parameter which controls the maximum number of rounds of AutoML to evaluate, where each round may train and score a variable number of pipelines. Another way is to set the `max_iterations` parameter which controls the maximum number of candidate models to be evaluated during AutoML. By default, AutoML will search for a single batch. The first pipeline to be evaluated will always be a baseline model representing a trivial solution. The AutoML interface supports a variety of other parameters. For a comprehensive list, please [refer to the API reference.](../autoapi/evalml/automl/index.rstevalml.automl.AutoMLSearch) We also provide [a standalone search method](../autoapi/evalml/automl/index.rstevalml.automl.search) which does all of the above in a single line, and returns the `AutoMLSearch` instance and data check results. If there were data check errors, AutoML will not be run and no `AutoMLSearch` instance will be returned. Detecting Problem TypeEvalML includes a simple method, `detect_problem_type`, to help determine the problem type given the target data. This function can return the predicted problem type as a ProblemType enum, choosing from ProblemType.BINARY, ProblemType.MULTICLASS, and ProblemType.REGRESSION. If the target data is invalid (for instance when there is only 1 unique label), the function will throw an error instead.
###Code
import pandas as pd
from evalml.problem_types import detect_problem_type
y_binary = pd.Series([0, 1, 1, 0, 1, 1])
detect_problem_type(y_binary)
###Output
_____no_output_____
###Markdown
Objective parameterAutoMLSearch takes in an `objective` parameter to determine which `objective` to optimize for. By default, this parameter is set to `auto`, which allows AutoML to choose `LogLossBinary` for binary classification problems, `LogLossMulticlass` for multiclass classification problems, and `R2` for regression problems.It should be noted that the `objective` parameter is only used in ranking and helping choose the pipelines to iterate over, but is not used to optimize each individual pipeline during fit-time.To get the default objective for each problem type, you can use the `get_default_primary_search_objective` function.
###Code
from evalml.automl import get_default_primary_search_objective
binary_objective = get_default_primary_search_objective("binary")
multiclass_objective = get_default_primary_search_objective("multiclass")
regression_objective = get_default_primary_search_objective("regression")
print(binary_objective.name)
print(multiclass_objective.name)
print(regression_objective.name)
###Output
_____no_output_____
###Markdown
Using custom pipelinesEvalML's AutoML algorithm generates a set of pipelines to search with. To provide a custom set instead, set allowed_component_graphs to a dictionary of custom component graphs. `AutoMLSearch` will use these to generate `Pipeline` instances. Note: this will prevent AutoML from generating other pipelines to search over.
###Code
from evalml.pipelines import MulticlassClassificationPipeline
automl_custom = evalml.automl.AutoMLSearch(
X_train=X_train,
y_train=y_train,
problem_type='multiclass',
verbose=True,
allowed_component_graphs={"My_pipeline": ['Simple Imputer', 'Random Forest Classifier'],
"My_other_pipeline": ['One Hot Encoder', 'Random Forest Classifier']})
###Output
_____no_output_____
###Markdown
Stopping the search earlyTo stop the search early, hit `Ctrl-C`. This will bring up a prompt asking for confirmation. Responding with `y` will immediately stop the search. Responding with `n` will continue the search. Callback functions``AutoMLSearch`` supports several callback functions, which can be specified as parameters when initializing an ``AutoMLSearch`` object. They are:- ``start_iteration_callback``- ``add_result_callback``- ``error_callback`` Start Iteration CallbackUsers can set ``start_iteration_callback`` to set what function is called before each pipeline training iteration. This callback function must take three positional parameters: the pipeline class, the pipeline parameters, and the ``AutoMLSearch`` object.
###Code
## start_iteration_callback example function
def start_iteration_callback_example(pipeline_class, pipeline_params, automl_obj):
print ("Training pipeline with the following parameters:", pipeline_params)
###Output
_____no_output_____
###Markdown
Add Result CallbackUsers can set ``add_result_callback`` to set what function is called after each pipeline training iteration. This callback function must take three positional parameters: a dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the ``AutoMLSearch`` object.
###Code
## add_result_callback example function
def add_result_callback_example(pipeline_results_dict, untrained_pipeline, automl_obj):
print ("Results for trained pipeline with the following parameters:", pipeline_results_dict)
###Output
_____no_output_____
###Markdown
Error CallbackUsers can set the ``error_callback`` to set what function called when `search()` errors and raises an ``Exception``. This callback function takes three positional parameters: the ``Exception raised``, the traceback, and the ``AutoMLSearch object``. This callback function must also accept ``kwargs``, so ``AutoMLSearch`` is able to pass along other parameters used by default.Evalml defines several error callback functions, which can be found under `evalml.automl.callbacks`. They are:- `silent_error_callback`- `raise_error_callback`- `log_and_save_error_callback`- `raise_and_save_error_callback`- `log_error_callback` (default used when ``error_callback`` is None)
###Code
# error_callback example; this is implemented in the evalml library
def raise_error_callback(exception, traceback, automl, **kwargs):
"""Raises the exception thrown by the AutoMLSearch object. Also logs the exception as an error."""
logger.error(f'AutoMLSearch raised a fatal exception: {str(exception)}')
logger.error("\n".join(traceback))
raise exception
###Output
_____no_output_____
###Markdown
View RankingsA summary of all the pipelines built can be returned as a pandas DataFrame which is sorted by score. The score column contains the average score across all cross-validation folds while the validation_score column is computed from the first cross-validation fold.
###Code
automl.rankings
###Output
_____no_output_____
###Markdown
Describe PipelineEach pipeline is given an `id`. We can get more information about any particular pipeline using that `id`. Here, we will get more information about the pipeline with `id = 1`.
###Code
automl.describe_pipeline(1)
###Output
_____no_output_____
###Markdown
Get PipelineWe can get the object of any pipeline via their `id` as well:
###Code
pipeline = automl.get_pipeline(1)
print(pipeline.name)
print(pipeline.parameters)
###Output
_____no_output_____
###Markdown
Get best pipelineIf you specifically want to get the best pipeline, there is a convenient accessor for that.The pipeline returned is already fitted on the input X, y data that we passed to AutoMLSearch. To turn off this default behavior, set `train_best_pipeline=False` when initializing AutoMLSearch.
###Code
best_pipeline = automl.best_pipeline
print(best_pipeline.name)
print(best_pipeline.parameters)
best_pipeline.predict(X_train)
###Output
_____no_output_____
###Markdown
Training and Scoring Multiple Pipelines using AutoMLSearchAutoMLSearch will automatically fit the best pipeline on the entire training data. It also provides an easy API for training and scoring other pipelines.If you'd like to train one or more pipelines on the entire training data, you can use the `train_pipelines`methodSimilarly, if you'd like to score one or more pipelines on a particular dataset, you can use the `train_pipelines`method
###Code
trained_pipelines = automl.train_pipelines([automl.get_pipeline(i) for i in [0, 1, 2]])
trained_pipelines
pipeline_holdout_scores = automl.score_pipelines([trained_pipelines[name] for name in trained_pipelines.keys()],
X_holdout,
y_holdout,
['Accuracy Binary', 'F1', 'AUC'])
pipeline_holdout_scores
###Output
_____no_output_____
###Markdown
Saving AutoMLSearch and pipelines from AutoMLSearchThere are two ways to save results from AutoMLSearch. - You can save the AutoMLSearch object itself, calling `.save()` to do so. This will allow you to save the AutoMLSearch state and reload all pipelines from this.- If you want to save a pipeline from AutoMLSearch for future use, pipeline classes themselves have a `.save()` method.
###Code
# saving the entire automl search
automl.save("automl.cloudpickle")
automl2 = evalml.automl.AutoMLSearch.load("automl.cloudpickle")
# saving the best pipeline using .save()
best_pipeline.save("pipeline.cloudpickle")
best_pipeline_copy = evalml.pipelines.PipelineBase.load("pipeline.cloudpickle")
###Output
_____no_output_____
###Markdown
Limiting the AutoML Search SpaceThe AutoML search algorithm first trains each component in the pipeline with their default values. After the first iteration, it then tweaks the parameters of these components using the pre-defined hyperparameter ranges that these components have. To limit the search over certain hyperparameter ranges, you can specify a `custom_hyperparameters` argument with your `AutoMLSearch` parameters. These parameters will limit the hyperparameter search space. Hyperparameter ranges can be found through the [API reference](https://evalml.alteryx.com/en/stable/api_reference.html) for each component. Parameter arguments must be specified as dictionaries, but the associated values can be single values or `skopt.space` Real, Integer, Categorical values.If however you'd like to specify certain values for the initial batch of the AutoML search algorithm, you can use the `pipeline_parameters` argument. This will set the initial batch's component parameters to the values passed by this argument.
###Code
from evalml import AutoMLSearch
from evalml.demos import load_fraud
from skopt.space import Categorical
from evalml.model_family import ModelFamily
import woodwork as ww
X, y = load_fraud(n_rows=1000)
# example of setting parameter to just one value
custom_hyperparameters = {'Imputer': {
'numeric_impute_strategy': 'mean'
}}
# limit the numeric impute strategy to include only `median` and `most_frequent`
# `mean` is the default value for this argument, but it doesn't need to be included in the specified hyperparameter range for this to work
custom_hyperparameters = {'Imputer': {
'numeric_impute_strategy': Categorical(['median', 'most_frequent'])
}}
# set the initial batch numeric impute strategy strategy to 'median'
pipeline_parameters = {'Imputer': {
'numeric_impute_strategy': 'median'
}}
# using this custom hyperparameter means that our Imputer components in these pipelines will only search through
# 'median' and 'most_frequent' strategies for 'numeric_impute_strategy', and the initial batch parameter will be
# set to 'median'
automl_constrained = AutoMLSearch(X_train=X, y_train=y, problem_type='binary',
pipeline_parameters=pipeline_parameters,
custom_hyperparameters=custom_hyperparameters,
verbose=True)
###Output
_____no_output_____
###Markdown
Imbalanced DataThe AutoML search algorithm now has functionality to handle imbalanced data during classification! AutoMLSearch now provides two additional parameters, `sampler_method` and `sampler_balanced_ratio`, that allow you to let AutoMLSearch know whether to sample imbalanced data, and how to do so. `sampler_method` takes in either `Undersampler`, `Oversampler`, `auto`, or None as the sampler to use, and `sampler_balanced_ratio` specifies the `minority/majority` ratio that you want to sample to. Details on the Undersampler and Oversampler components can be found in the [documentation](https://evalml.alteryx.com/en/stable/api_reference.htmltransformers).This can be used for imbalanced datasets, like the fraud dataset, which has a 'minority:majority' ratio of < 0.2.
###Code
automl_auto = AutoMLSearch(X_train=X, y_train=y, problem_type='binary')
automl_auto.allowed_pipelines[-1]
###Output
_____no_output_____
###Markdown
The Oversampler is chosen as the default sampling component here, since the `sampler_balanced_ratio = 0.25`. If you specified a lower ratio, for instance `sampler_balanced_ratio = 0.1`, then there would be no sampling component added here. This is because if a ratio of 0.1 would be considered balanced, then a ratio of 0.2 would also be balanced.The Oversampler uses SMOTE under the hood, and automatically selects whether to use SMOTE, SMOTEN, or SMOTENC based on the data it receives.
###Code
automl_auto_ratio = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', sampler_balanced_ratio=0.1)
automl_auto_ratio.allowed_pipelines[-1]
###Output
_____no_output_____
###Markdown
Additionally, you can add more fine-grained sampling ratios by passing in a `sampling_ratio_dict` in pipeline parameters. For this dictionary, AutoMLSearch expects the keys to be int values from 0 to `n-1` for the classes, and the values would be the `sampler_balanced__ratio` associated with each target. This dictionary would override the AutoML argument `sampler_balanced_ratio`. Below, you can see the scenario for Oversampler component on this dataset. Note that the logic for Undersamplers is included in the commented section.
###Code
# In this case, the majority class is the negative class
# for the oversampler, we don't want to oversample this class, so class 0 (majority) will have a ratio of 1 to itself
# for the minority class 1, we want to oversample it to have a minority/majority ratio of 0.5, which means we want minority to have 1/2 the samples as the minority
sampler_ratio_dict = {0: 1, 1: 0.5}
pipeline_parameters = {"Oversampler": {"sampler_balanced_ratio": sampler_ratio_dict}}
automl_auto_ratio_dict = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', pipeline_parameters=pipeline_parameters)
automl_auto_ratio_dict.allowed_pipelines[-1]
# Undersampler case
# we don't want to undersample this class, so class 1 (minority) will have a ratio of 1 to itself
# for the majority class 0, we want to undersample it to have a minority/majority ratio of 0.5, which means we want majority to have 2x the samples as the minority
# sampler_ratio_dict = {0: 0.5, 1: 1}
# pipeline_parameters = {"Oversampler": {"sampler_balanced_ratio": sampler_ratio_dict}}
# automl_auto_ratio_dict = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', pipeline_parameters=pipeline_parameters)
###Output
_____no_output_____
###Markdown
Adding ensemble methods to AutoML Stacking[Stacking](https://en.wikipedia.org/wiki/Ensemble_learningStacking) is an ensemble machine learning algorithm that involves training a model to best combine the predictions of several base learning algorithms. First, each base learning algorithms is trained using the given data. Then, the combining algorithm or meta-learner is trained on the predictions made by those base learning algorithms to make a final prediction.AutoML enables stacking using the `ensembling` flag during initalization; this is set to `False` by default. The stacking ensemble pipeline runs in its own batch after a whole cycle of training has occurred (each allowed pipeline trains for one batch). Note that this means __a large number of iterations may need to run before the stacking ensemble runs__. It is also important to note that __only the first CV fold is calculated for stacking ensembles__ because the model internally uses CV folds.
###Code
X, y = evalml.demos.load_breast_cancer()
automl_with_ensembling = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.LINEAR_MODEL],
max_batches=4,
ensembling=True,
verbose=True)
automl_with_ensembling.search()
###Output
_____no_output_____
###Markdown
We can view more information about the stacking ensemble pipeline (which was the best performing pipeline) by calling `.describe()`.
###Code
automl_with_ensembling.best_pipeline.describe()
###Output
_____no_output_____
###Markdown
Access raw resultsThe `AutoMLSearch` class records detailed results information under the `results` field, including information about the cross-validation scoring and parameters.
###Code
automl.results
###Output
_____no_output_____
###Markdown
Parallel AutoMLBy default, all pipelines in an AutoML batch are evaluated in series. Pipelines can be evaluated in parallel to improve performance during AutoML search. This is accomplished by a futures style submission and evaluation of pipelines in a batch. As of this writing, the pipelines use a threaded model for concurrent evaluation. This is similar to the currently implemented `n_jobs` parameter in the estimators, which uses increased numbers of threads to train and evaluate estimators. Quick StartTo quickly use some parallelism to enhance the pipeline searching, a string can be passed through to AutoMLSearch during initialization to setup the parallel engine and client within the AutoMLSearch object. The current options are "cf_threaded", "cf_process", "dask_threaded" and "dask_process" and indicate the futures backend to use and whether to use threaded- or process-level parallelism.
###Code
automl_cf_threaded = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.LINEAR_MODEL],
engine="cf_threaded")
automl_cf_threaded.search(show_iteration_plot = False)
automl_cf_threaded.close_engine()
###Output
_____no_output_____
###Markdown
Parallelism with Concurrent FuturesThe `EngineBase` class is robust and extensible enough to support futures-like implementations from a variety of libraries. The `CFEngine` extends the `EngineBase` to use the native Python [concurrent.futures library](https://docs.python.org/3/library/concurrent.futures.html). The `CFEngine` supports both thread- and process-level parallelism. The type of parallelism can be chosen using either the `ThreadPoolExecutor` or the `ProcessPoolExecutor`. If either executor is passed a `max_workers` parameter, it will set the number of processes and threads spawned. If not, the default number of processes will be equal to the number of processors available and the number of threads set to five times the number of processors available.Here, the CFEngine is invoked with default parameters, which is threaded parallelism using all available threads.
###Code
from concurrent.futures import ThreadPoolExecutor
from evalml.automl.engine.cf_engine import CFEngine, CFClient
cf_engine = CFEngine(CFClient(ThreadPoolExecutor(max_workers=4)))
automl_cf_threaded = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.LINEAR_MODEL],
engine=cf_engine)
automl_cf_threaded.search(show_iteration_plot = False)
automl_cf_threaded.close_engine()
###Output
_____no_output_____
###Markdown
Note: the cell demonstrating process-level parallelism is a markdown due to incompatibility with our ReadTheDocs build. It can be run successfully locally.```pythonfrom concurrent.futures import ProcessPoolExecutor Repeat the process but using process-level parallelism\cf_engine = CFEngine(CFClient(ProcessPoolExecutor(max_workers=2)))automl_cf_process = AutoMLSearch(X_train=X, y_train=y, problem_type="binary", engine="cf_process")automl_cf_process.search(show_iteration_plot = False)automl_cf_process.close_engine()``` Parallelism with DaskThread or process level parallelism can be explicitly invoked for the `DaskEngine` (as well as the `CFEngine`). The `processes` can be set to `True` and the number of processes set using `n_workers`. If `processes` is set to `False`, then the resulting parallelism will be threaded and `n_workers` will represent the threads used. Examples of both follow.
###Code
from dask.distributed import LocalCluster
from evalml.automl.engine import DaskEngine
dask_engine_p2 = DaskEngine(cluster=LocalCluster(processes=True, n_workers = 2))
automl_dask_p2 = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.LINEAR_MODEL],
engine=dask_engine_p2)
automl_dask_p2.search(show_iteration_plot = False)
# Explicitly shutdown the automl object's LocalCluster
automl_dask_p2.close_engine()
dask_engine_t4 = DaskEngine(cluster=LocalCluster(processes=False, n_workers = 4))
automl_dask_t4 = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.LINEAR_MODEL],
engine=dask_engine_t4)
automl_dask_t4.search(show_iteration_plot = False)
automl_dask_t4.close_engine()
###Output
_____no_output_____
###Markdown
As we can see, a significant performance gain can result from simply using something other than the default `SequentialEngine`, ranging from a 100% speed up with multiple processes to 500% speedup with multiple threads!
###Code
print("Sequential search duration: %s" % str(automl.search_duration))
print("Concurrent futures (threaded) search duration: %s" % str(automl_cf_threaded.search_duration))
print("Dask (two processes) search duration: %s" % str(automl_dask_p2.search_duration))
print("Dask (four threads)search duration: %s" % str(automl_dask_t4.search_duration))
###Output
_____no_output_____
###Markdown
Automated Machine Learning (AutoML) Search Background Machine Learning[Machine learning](https://en.wikipedia.org/wiki/Machine_learning) (ML) is the process of constructing a mathematical model of a system based on a sample dataset collected from that system.One of the main goals of training an ML model is to teach the model to separate the signal present in the data from the noise inherent in system and in the data collection process. If this is done effectively, the model can then be used to make accurate predictions about the system when presented with new, similar data. Additionally, introspecting on an ML model can reveal key information about the system being modeled, such as which inputs and transformations of the inputs are most useful to the ML model for learning the signal in the data, and are therefore the most predictive.There are [a variety](https://en.wikipedia.org/wiki/Machine_learningApproaches) of ML problem types. Supervised learning describes the case where the collected data contains an output value to be modeled and a set of inputs with which to train the model. EvalML focuses on training supervised learning models.EvalML supports three common supervised ML problem types. The first is regression, where the target value to model is a continuous numeric value. Next are binary and multiclass classification, where the target value to model consists of two or more discrete values or categories. The choice of which supervised ML problem type is most appropriate depends on domain expertise and on how the model will be evaluated and used. EvalML is currently building support for supervised time series problems: time series regression, time series binary classification, and time series multiclass classification. While we've added some features to tackle these kinds of problems, our functionality is still being actively developed so please be mindful of that before using it. AutoML and Search[AutoML](https://en.wikipedia.org/wiki/Automated_machine_learning) is the process of automating the construction, training and evaluation of ML models. Given a data and some configuration, AutoML searches for the most effective and accurate ML model or models to fit the dataset. During the search, AutoML will explore different combinations of model type, model parameters and model architecture.An effective AutoML solution offers several advantages over constructing and tuning ML models by hand. AutoML can assist with many of the difficult aspects of ML, such as avoiding overfitting and underfitting, imbalanced data, detecting data leakage and other potential issues with the problem setup, and automatically applying best-practice data cleaning, feature engineering, feature selection and various modeling techniques. AutoML can also leverage search algorithms to optimally sweep the hyperparameter search space, resulting in model performance which would be difficult to achieve by manual training. AutoML in EvalMLEvalML supports all of the above and more.In its simplest usage, the AutoML search interface requires only the input data, the target data and a `problem_type` specifying what kind of supervised ML problem to model.** Graphing methods, like AutoMLSearch, on Jupyter Notebook and Jupyter Lab require [ipywidgets](https://ipywidgets.readthedocs.io/en/latest/user_install.html) to be installed.** If graphing on Jupyter Lab, [jupyterlab-plotly](https://plotly.com/python/getting-started/jupyterlab-support-python-35) required. To download this, make sure you have [npm](https://nodejs.org/en/download/) installed.
###Code
import evalml
from evalml.utils import infer_feature_types
X, y = evalml.demos.load_fraud(n_rows=250)
###Output
_____no_output_____
###Markdown
To provide data to EvalML, it is recommended that you initialize a [Woodwork accessor](https://woodwork.alteryx.com/en/stable/) on your data. This allows you to easily control how EvalML will treat each of your features before training a model.EvalML also accepts ``pandas`` input, and will run type inference on top of the input ``pandas`` data. If you'd like to change the types inferred by EvalML, you can use the `infer_feature_types` utility method, which takes pandas or numpy input and converts it to a Woodwork data structure. The `feature_types` parameter can be used to specify what types specific columns should be.Feature types such as `Natural Language` must be specified in this way, otherwise Woodwork will infer it as `Unknown` type and drop it during the AutoMLSearch.In the example below, we reformat a couple features to make them easily consumable by the model, and then specify that the provider, which would have otherwise been inferred as a column with natural language, is a categorical column.
###Code
X.ww['expiration_date'] = X['expiration_date'].apply(lambda x: '20{}-01-{}'.format(x.split("/")[1], x.split("/")[0]))
X = infer_feature_types(X, feature_types= {'store_id': 'categorical',
'expiration_date': 'datetime',
'lat': 'categorical',
'lng': 'categorical',
'provider': 'categorical'})
###Output
_____no_output_____
###Markdown
In order to validate the results of the pipeline creation and optimization process, we will save some of our data as a holdout set.
###Code
X_train, X_holdout, y_train, y_holdout = evalml.preprocessing.split_data(X, y, problem_type='binary', test_size=.2)
###Output
_____no_output_____
###Markdown
Data ChecksBefore calling `AutoMLSearch.search`, we should run some sanity checks on our data to ensure that the input data being passed will not run into some common issues before running a potentially time-consuming search. EvalML has various data checks that makes this easy. Each data check will return a collection of warnings and errors if it detects potential issues with the input data. This allows users to inspect their data to avoid confusing errors that may arise during the search process. You can learn about each of the data checks available through our [data checks guide](data_checks.ipynb) Here, we will run the `DefaultDataChecks` class, which contains a series of data checks that are generally useful.
###Code
from evalml.data_checks import DefaultDataChecks
data_checks = DefaultDataChecks("binary", "log loss binary")
data_checks.validate(X_train, y_train)
###Output
_____no_output_____
###Markdown
Since there were no warnings or errors returned, we can safely continue with the search process.
###Code
automl = evalml.automl.AutoMLSearch(X_train=X_train, y_train=y_train, problem_type='binary')
automl.search()
###Output
_____no_output_____
###Markdown
The AutoML search will log its progress, reporting each pipeline and parameter set evaluated during the search.There are a number of mechanisms to control the AutoML search time. One way is to set the `max_batches` parameter which controls the maximum number of rounds of AutoML to evaluate, where each round may train and score a variable number of pipelines. Another way is to set the `max_iterations` parameter which controls the maximum number of candidate models to be evaluated during AutoML. By default, AutoML will search for a single batch. The first pipeline to be evaluated will always be a baseline model representing a trivial solution. The AutoML interface supports a variety of other parameters. For a comprehensive list, please [refer to the API reference.](../autoapi/evalml/automl/index.rstevalml.automl.AutoMLSearch) We also provide [a standalone search method](../autoapi/evalml/automl/index.rstevalml.automl.search) which does all of the above in a single line, and returns the `AutoMLSearch` instance and data check results. If there were data check errors, AutoML will not be run and no `AutoMLSearch` instance will be returned. Detecting Problem TypeEvalML includes a simple method, `detect_problem_type`, to help determine the problem type given the target data. This function can return the predicted problem type as a ProblemType enum, choosing from ProblemType.BINARY, ProblemType.MULTICLASS, and ProblemType.REGRESSION. If the target data is invalid (for instance when there is only 1 unique label), the function will throw an error instead.
###Code
import pandas as pd
from evalml.problem_types import detect_problem_type
y_binary = pd.Series([0, 1, 1, 0, 1, 1])
detect_problem_type(y_binary)
###Output
_____no_output_____
###Markdown
Objective parameterAutoMLSearch takes in an `objective` parameter to determine which `objective` to optimize for. By default, this parameter is set to `auto`, which allows AutoML to choose `LogLossBinary` for binary classification problems, `LogLossMulticlass` for multiclass classification problems, and `R2` for regression problems.It should be noted that the `objective` parameter is only used in ranking and helping choose the pipelines to iterate over, but is not used to optimize each individual pipeline during fit-time.To get the default objective for each problem type, you can use the `get_default_primary_search_objective` function.
###Code
from evalml.automl import get_default_primary_search_objective
binary_objective = get_default_primary_search_objective("binary")
multiclass_objective = get_default_primary_search_objective("multiclass")
regression_objective = get_default_primary_search_objective("regression")
print(binary_objective.name)
print(multiclass_objective.name)
print(regression_objective.name)
###Output
_____no_output_____
###Markdown
Using custom pipelinesEvalML's AutoML algorithm generates a set of pipelines to search with. To provide a custom set instead, set allowed_component_graphs to a dictionary of custom component graphs. `AutoMLSearch` will use these to generate `Pipeline` instances. Note: this will prevent AutoML from generating other pipelines to search over.
###Code
from evalml.pipelines import MulticlassClassificationPipeline
automl_custom = evalml.automl.AutoMLSearch(X_train=X_train,
y_train=y_train,
problem_type='multiclass',
allowed_component_graphs={"My_pipeline": ['Simple Imputer', 'Random Forest Classifier'],
"My_other_pipeline": ['One Hot Encoder', 'Random Forest Classifier']})
###Output
_____no_output_____
###Markdown
Stopping the search earlyTo stop the search early, hit `Ctrl-C`. This will bring up a prompt asking for confirmation. Responding with `y` will immediately stop the search. Responding with `n` will continue the search. Callback functions``AutoMLSearch`` supports several callback functions, which can be specified as parameters when initializing an ``AutoMLSearch`` object. They are:- ``start_iteration_callback``- ``add_result_callback``- ``error_callback`` Start Iteration CallbackUsers can set ``start_iteration_callback`` to set what function is called before each pipeline training iteration. This callback function must take three positional parameters: the pipeline class, the pipeline parameters, and the ``AutoMLSearch`` object.
###Code
## start_iteration_callback example function
def start_iteration_callback_example(pipeline_class, pipeline_params, automl_obj):
print ("Training pipeline with the following parameters:", pipeline_params)
###Output
_____no_output_____
###Markdown
Add Result CallbackUsers can set ``add_result_callback`` to set what function is called after each pipeline training iteration. This callback function must take three positional parameters: a dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the ``AutoMLSearch`` object.
###Code
## add_result_callback example function
def add_result_callback_example(pipeline_results_dict, untrained_pipeline, automl_obj):
print ("Results for trained pipeline with the following parameters:", pipeline_results_dict)
###Output
_____no_output_____
###Markdown
Error CallbackUsers can set the ``error_callback`` to set what function called when `search()` errors and raises an ``Exception``. This callback function takes three positional parameters: the ``Exception raised``, the traceback, and the ``AutoMLSearch object``. This callback function must also accept ``kwargs``, so ``AutoMLSearch`` is able to pass along other parameters used by default.Evalml defines several error callback functions, which can be found under `evalml.automl.callbacks`. They are:- `silent_error_callback`- `raise_error_callback`- `log_and_save_error_callback`- `raise_and_save_error_callback`- `log_error_callback` (default used when ``error_callback`` is None)
###Code
# error_callback example; this is implemented in the evalml library
def raise_error_callback(exception, traceback, automl, **kwargs):
"""Raises the exception thrown by the AutoMLSearch object. Also logs the exception as an error."""
logger.error(f'AutoMLSearch raised a fatal exception: {str(exception)}')
logger.error("\n".join(traceback))
raise exception
###Output
_____no_output_____
###Markdown
View RankingsA summary of all the pipelines built can be returned as a pandas DataFrame which is sorted by score. The score column contains the average score across all cross-validation folds while the validation_score column is computed from the first cross-validation fold.
###Code
automl.rankings
###Output
_____no_output_____
###Markdown
Describe PipelineEach pipeline is given an `id`. We can get more information about any particular pipeline using that `id`. Here, we will get more information about the pipeline with `id = 1`.
###Code
automl.describe_pipeline(1)
###Output
_____no_output_____
###Markdown
Get PipelineWe can get the object of any pipeline via their `id` as well:
###Code
pipeline = automl.get_pipeline(1)
print(pipeline.name)
print(pipeline.parameters)
###Output
_____no_output_____
###Markdown
Get best pipelineIf you specifically want to get the best pipeline, there is a convenient accessor for that.The pipeline returned is already fitted on the input X, y data that we passed to AutoMLSearch. To turn off this default behavior, set `train_best_pipeline=False` when initializing AutoMLSearch.
###Code
best_pipeline = automl.best_pipeline
print(best_pipeline.name)
print(best_pipeline.parameters)
best_pipeline.predict(X_train)
###Output
_____no_output_____
###Markdown
Training and Scoring Multiple Pipelines using AutoMLSearchAutoMLSearch will automatically fit the best pipeline on the entire training data. It also provides an easy API for training and scoring other pipelines.If you'd like to train one or more pipelines on the entire training data, you can use the `train_pipelines`methodSimilarly, if you'd like to score one or more pipelines on a particular dataset, you can use the `train_pipelines`method
###Code
trained_pipelines = automl.train_pipelines([automl.get_pipeline(i) for i in [0, 1, 2]])
trained_pipelines
pipeline_holdout_scores = automl.score_pipelines([trained_pipelines[name] for name in trained_pipelines.keys()],
X_holdout,
y_holdout,
['Accuracy Binary', 'F1', 'AUC'])
pipeline_holdout_scores
###Output
_____no_output_____
###Markdown
Saving AutoMLSearch and pipelines from AutoMLSearchThere are two ways to save results from AutoMLSearch. - You can save the AutoMLSearch object itself, calling `.save()` to do so. This will allow you to save the AutoMLSearch state and reload all pipelines from this.- If you want to save a pipeline from AutoMLSearch for future use, pipeline classes themselves have a `.save()` method.
###Code
# saving the entire automl search
automl.save("automl.cloudpickle")
automl2 = evalml.automl.AutoMLSearch.load("automl.cloudpickle")
# saving the best pipeline using .save()
best_pipeline.save("pipeline.cloudpickle")
best_pipeline_copy = evalml.pipelines.PipelineBase.load("pipeline.cloudpickle")
###Output
_____no_output_____
###Markdown
Limiting the AutoML Search SpaceThe AutoML search algorithm first trains each component in the pipeline with their default values. After the first iteration, it then tweaks the parameters of these components using the pre-defined hyperparameter ranges that these components have. To limit the search over certain hyperparameter ranges, you can specify a `custom_hyperparameters` argument with your `AutoMLSearch` parameters. These parameters will limit the hyperparameter search space. Hyperparameter ranges can be found through the [API reference](https://evalml.alteryx.com/en/stable/api_reference.html) for each component. Parameter arguments must be specified as dictionaries, but the associated values can be single values or `skopt.space` Real, Integer, Categorical values.If however you'd like to specify certain values for the initial batch of the AutoML search algorithm, you can use the `pipeline_parameters` argument. This will set the initial batch's component parameters to the values passed by this argument.
###Code
from evalml import AutoMLSearch
from evalml.demos import load_fraud
from skopt.space import Categorical
from evalml.model_family import ModelFamily
import woodwork as ww
X, y = load_fraud(n_rows=1000)
# example of setting parameter to just one value
custom_hyperparameters = {'Imputer': {
'numeric_impute_strategy': 'mean'
}}
# limit the numeric impute strategy to include only `median` and `most_frequent`
# `mean` is the default value for this argument, but it doesn't need to be included in the specified hyperparameter range for this to work
custom_hyperparameters = {'Imputer': {
'numeric_impute_strategy': Categorical(['median', 'most_frequent'])
}}
# set the initial batch numeric impute strategy strategy to 'median'
pipeline_parameters = {'Imputer': {
'numeric_impute_strategy': 'median'
}}
# using this custom hyperparameter means that our Imputer components in these pipelines will only search through
# 'median' and 'most_frequent' strategies for 'numeric_impute_strategy', and the initial batch parameter will be
# set to 'median'
automl_constrained = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', pipeline_parameters=pipeline_parameters,
custom_hyperparameters=custom_hyperparameters)
###Output
_____no_output_____
###Markdown
Imbalanced DataThe AutoML search algorithm now has functionality to handle imbalanced data during classification! AutoMLSearch now provides two additional parameters, `sampler_method` and `sampler_balanced_ratio`, that allow you to let AutoMLSearch know whether to sample imbalanced data, and how to do so. `sampler_method` takes in either `Undersampler`, `Oversampler`, `auto`, or None as the sampler to use, and `sampler_balanced_ratio` specifies the `minority/majority` ratio that you want to sample to. Details on the Undersampler and Oversampler components can be found in the [documentation](https://evalml.alteryx.com/en/stable/api_reference.htmltransformers).This can be used for imbalanced datasets, like the fraud dataset, which has a 'minority:majority' ratio of < 0.2.
###Code
automl_auto = AutoMLSearch(X_train=X, y_train=y, problem_type='binary')
automl_auto.allowed_pipelines[-1]
###Output
_____no_output_____
###Markdown
The SMOTENC Oversampler is chosen as the default sampling component here, since the `sampler_balanced_ratio = 0.25`. If you specified a lower ratio, for instance `sampler_balanced_ratio = 0.1`, then there would be no sampling component added here. This is because if a ratio of 0.1 would be considered balanced, then a ratio of 0.2 would also be balanced.
###Code
automl_auto_ratio = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', sampler_balanced_ratio=0.1)
automl_auto_ratio.allowed_pipelines[-1]
###Output
_____no_output_____
###Markdown
Additionally, you can add more fine-grained sampling ratios by passing in a `sampling_ratio_dict` in pipeline parameters. For this dictionary, AutoMLSearch expects the keys to be int values from 0 to `n-1` for the classes, and the values would be the `sampler_balanced__ratio` associated with each target. This dictionary would override the AutoML argument `sampler_balanced_ratio`. Below, you can see the scenario for Oversampler component on this dataset. Note that the logic for Undersamplers is included in the commented section.
###Code
# In this case, the majority class is the negative class
# for the oversampler, we don't want to oversample this class, so class 0 (majority) will have a ratio of 1 to itself
# for the minority class 1, we want to oversample it to have a minority/majority ratio of 0.5, which means we want minority to have 1/2 the samples as the minority
sampler_ratio_dict = {0: 1, 1: 0.5}
pipeline_parameters = {"SMOTENC Oversampler": {"sampler_balanced_ratio": sampler_ratio_dict}}
automl_auto_ratio_dict = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', pipeline_parameters=pipeline_parameters)
automl_auto_ratio_dict.allowed_pipelines[-1]
# Undersampler case
# we don't want to undersample this class, so class 1 (minority) will have a ratio of 1 to itself
# for the majority class 0, we want to undersample it to have a minority/majority ratio of 0.5, which means we want majority to have 2x the samples as the minority
# sampler_ratio_dict = {0: 0.5, 1: 1}
# pipeline_parameters = {"SMOTENC Oversampler": {"sampler_balanced_ratio": sampler_ratio_dict}}
# automl_auto_ratio_dict = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', pipeline_parameters=pipeline_parameters)
###Output
_____no_output_____
###Markdown
Adding ensemble methods to AutoML Stacking[Stacking](https://en.wikipedia.org/wiki/Ensemble_learningStacking) is an ensemble machine learning algorithm that involves training a model to best combine the predictions of several base learning algorithms. First, each base learning algorithms is trained using the given data. Then, the combining algorithm or meta-learner is trained on the predictions made by those base learning algorithms to make a final prediction.AutoML enables stacking using the `ensembling` flag during initalization; this is set to `False` by default. The stacking ensemble pipeline runs in its own batch after a whole cycle of training has occurred (each allowed pipeline trains for one batch). Note that this means __a large number of iterations may need to run before the stacking ensemble runs__. It is also important to note that __only the first CV fold is calculated for stacking ensembles__ because the model internally uses CV folds.
###Code
X, y = evalml.demos.load_breast_cancer()
automl_with_ensembling = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.LINEAR_MODEL],
max_batches=4,
ensembling=True)
automl_with_ensembling.search()
###Output
_____no_output_____
###Markdown
We can view more information about the stacking ensemble pipeline (which was the best performing pipeline) by calling `.describe()`.
###Code
automl_with_ensembling.best_pipeline.describe()
###Output
_____no_output_____
###Markdown
Access raw resultsThe `AutoMLSearch` class records detailed results information under the `results` field, including information about the cross-validation scoring and parameters.
###Code
automl.results
###Output
_____no_output_____
###Markdown
Automated Machine Learning (AutoML) Search Background Machine Learning[Machine learning](https://en.wikipedia.org/wiki/Machine_learning) (ML) is the process of constructing a mathematical model of a system based on a sample dataset collected from that system.One of the main goals of training an ML model is to teach the model to separate the signal present in the data from the noise inherent in system and in the data collection process. If this is done effectively, the model can then be used to make accurate predictions about the system when presented with new, similar data. Additionally, introspecting on an ML model can reveal key information about the system being modeled, such as which inputs and transformations of the inputs are most useful to the ML model for learning the signal in the data, and are therefore the most predictive.There are [a variety](https://en.wikipedia.org/wiki/Machine_learningApproaches) of ML problem types. Supervised learning describes the case where the collected data contains an output value to be modeled and a set of inputs with which to train the model. EvalML focuses on training supervised learning models.EvalML supports three common supervised ML problem types. The first is regression, where the target value to model is a continuous numeric value. Next are binary and multiclass classification, where the target value to model consists of two or more discrete values or categories. The choice of which supervised ML problem type is most appropriate depends on domain expertise and on how the model will be evaluated and used. AutoML and Search[AutoML](https://en.wikipedia.org/wiki/Automated_machine_learning) is the process of automating the construction, training and evaluation of ML models. Given a data and some configuration, AutoML searches for the most effective and accurate ML model or models to fit the dataset. During the search, AutoML will explore different combinations of model type, model parameters and model architecture.An effective AutoML solution offers several advantages over constructing and tuning ML models by hand. AutoML can assist with many of the difficult aspects of ML, such as avoiding overfitting and underfitting, imbalanced data, detecting data leakage and other potential issues with the problem setup, and automatically applying best-practice data cleaning, feature engineering, feature selection and various modeling techniques. AutoML can also leverage search algorithms to optimally sweep the hyperparameter search space, resulting in model performance which would be difficult to achieve by manual training. AutoML in EvalMLEvalML supports all of the above and more.In its simplest usage, the AutoML search interface requires only the input data, the target data and a `problem_type` specifying what kind of supervised ML problem to model.** Graphing methods, like AutoMLSearch, on Jupyter Notebook and Jupyter Lab require [ipywidgets](https://ipywidgets.readthedocs.io/en/latest/user_install.html) to be installed.** If graphing on Jupyter Lab, [jupyterlab-plotly](https://plotly.com/python/getting-started/jupyterlab-support-python-35) required. To download this, make sure you have [npm](https://nodejs.org/en/download/) installed. __Note:__ To provide data to EvalML, it is recommended that you create a `DataTable` object using [the Woodwork project](https://woodwork.alteryx.com/en/stable/).EvalML also accepts ``pandas`` input, and will run type inference on top of the input ``pandas`` data. If you’d like to change the types inferred by EvalML, you can use the `infer_feature_types` utility method as follows. The `infer_feature_types` utility method takes pandas or numpy input and converts it to a Woodwork data structure. It takes in a `feature_types` parameter which can be used to specify what types specific columns should be. In the example below, we specify that the provider, which would have otherwise been inferred as a column with natural language, is a categorical column.
###Code
import evalml
from evalml.utils import infer_feature_types
X, y = evalml.demos.load_fraud(n_rows=1000, return_pandas=True)
X = infer_feature_types(X, feature_types={'provider': 'categorical'})
automl = evalml.automl.AutoMLSearch(X_train=X, y_train=y, problem_type='binary')
automl.search()
###Output
_____no_output_____
###Markdown
The AutoML search will log its progress, reporting each pipeline and parameter set evaluated during the search.There are a number of mechanisms to control the AutoML search time. One way is to set the `max_batches` parameter which controls the maximum number of rounds of AutoML to evaluate, where each round may train and score a variable number of pipelines. Another way is to set the `max_iterations` parameter which controls the maximum number of candidate models to be evaluated during AutoML. By default, AutoML will search for a single batch. The first pipeline to be evaluated will always be a baseline model representing a trivial solution. The AutoML interface supports a variety of other parameters. For a comprehensive list, please [refer to the API reference.](../generated/evalml.automl.AutoMLSearch.ipynb) Detecting Problem TypeEvalML includes a simple method, `detect_problem_type`, to help determine the problem type given the target data. This function can return the predicted problem type as a ProblemType enum, choosing from ProblemType.BINARY, ProblemType.MULTICLASS, and ProblemType.REGRESSION. If the target data is invalid (for instance when there is only 1 unique label), the function will throw an error instead.
###Code
import pandas as pd
from evalml.problem_types import detect_problem_type
y = pd.Series([0, 1, 1, 0, 1, 1])
detect_problem_type(y)
###Output
_____no_output_____
###Markdown
Objective parameterAutoMLSearch takes in an `objective` parameter to determine which `objective` to optimize for. By default, this parameter is set to `auto`, which allows AutoML to choose `LogLossBinary` for binary classification problems, `LogLossMulticlass` for multiclass classification problems, and `R2` for regression problems.It should be noted that the `objective` parameter is only used in ranking and helping choose the pipelines to iterate over, but is not used to optimize each individual pipeline during fit-time.To get the default objective for each problem type, you can use the `get_default_primary_search_objective` function.
###Code
from evalml.automl import get_default_primary_search_objective
binary_objective = get_default_primary_search_objective("binary")
multiclass_objective = get_default_primary_search_objective("multiclass")
regression_objective = get_default_primary_search_objective("regression")
print(binary_objective.name)
print(multiclass_objective.name)
print(regression_objective.name)
###Output
_____no_output_____
###Markdown
Data Checks`AutoMLSearch.search` runs a set of data checks before beginning the search process to ensure that the input data being passed will not run into some common issues before running a potentially time-consuming search. If the data checks find any potential errors, an exception will be thrown before the search begins, allowing users to inspect their data to avoid confusing errors that may arise later during the search process.This behavior is controlled by the `data_checks` parameter which can take in either a `DataChecks` object, a list of `DataCheck` objects, `None`, or valid string inputs (`"disabled"`, `"auto"`). By default, this parameter is set to `auto`, which runs the default collection of data sets defined in the `DefaultDataChecks` class. If set to `"disabled"` or None, no data checks will run. Using custom pipelinesEvalML's AutoML algorithm generates a set of pipelines to search with. To provide a custom set instead, set allowed_pipelines to a list of [custom pipeline](pipelines.ipynb) classes. Note: this will prevent AutoML from generating other pipelines to search over.
###Code
from evalml.pipelines import MulticlassClassificationPipeline
class CustomMulticlassClassificationPipeline(MulticlassClassificationPipeline):
component_graph = ['Simple Imputer', 'Random Forest Classifier']
automl_custom = evalml.automl.AutoMLSearch(X_train=X, y_train=y, problem_type='multiclass', allowed_pipelines=[CustomMulticlassClassificationPipeline])
###Output
_____no_output_____
###Markdown
Stopping the search earlyTo stop the search early, hit `Ctrl-C`. This will bring up a prompt asking for confirmation. Responding with `y` will immediately stop the search. Responding with `n` will continue the search. Callback functions``AutoMLSearch`` supports several callback functions, which can be specified as parameters when initializing an ``AutoMLSearch`` object. They are:- ``start_iteration_callback``- ``add_result_callback``- ``error_callback`` Start Iteration CallbackUsers can set ``start_iteration_callback`` to set what function is called before each pipeline training iteration. This callback function must take three positional parameters: the pipeline class, the pipeline parameters, and the ``AutoMLSearch`` object.
###Code
## start_iteration_callback example function
def start_iteration_callback_example(pipeline_class, pipeline_params, automl_obj):
print ("Training pipeline with the following parameters:", pipeline_params)
###Output
_____no_output_____
###Markdown
Add Result CallbackUsers can set ``add_result_callback`` to set what function is called after each pipeline training iteration. This callback function must take three positional parameters: a dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the ``AutoMLSearch`` object.
###Code
## add_result_callback example function
def add_result_callback_example(pipeline_results_dict, untrained_pipeline, automl_obj):
print ("Results for trained pipeline with the following parameters:", pipeline_results_dict)
###Output
_____no_output_____
###Markdown
Error CallbackUsers can set the ``error_callback`` to set what function called when `search()` errors and raises an ``Exception``. This callback function takes three positional parameters: the ``Exception raised``, the traceback, and the ``AutoMLSearch object``. This callback function must also accept ``kwargs``, so ``AutoMLSearch`` is able to pass along other parameters used by default.Evalml defines several error callback functions, which can be found under `evalml.automl.callbacks`. They are:- `silent_error_callback`- `raise_error_callback`- `log_and_save_error_callback`- `raise_and_save_error_callback`- `log_error_callback` (default used when ``error_callback`` is None)
###Code
# error_callback example; this is implemented in the evalml library
def raise_error_callback(exception, traceback, automl, **kwargs):
"""Raises the exception thrown by the AutoMLSearch object. Also logs the exception as an error."""
logger.error(f'AutoMLSearch raised a fatal exception: {str(exception)}')
logger.error("\n".join(traceback))
raise exception
###Output
_____no_output_____
###Markdown
View RankingsA summary of all the pipelines built can be returned as a pandas DataFrame which is sorted by score. The score column contains the average score across all cross-validation folds while the validation_score column is computed from the first cross-validation fold.
###Code
automl.rankings
###Output
_____no_output_____
###Markdown
Describe PipelineEach pipeline is given an `id`. We can get more information about any particular pipeline using that `id`. Here, we will get more information about the pipeline with `id = 1`.
###Code
automl.describe_pipeline(1)
###Output
_____no_output_____
###Markdown
Get PipelineWe can get the object of any pipeline via their `id` as well:
###Code
pipeline = automl.get_pipeline(1)
print(pipeline.name)
print(pipeline.parameters)
###Output
_____no_output_____
###Markdown
Get best pipelineIf you specifically want to get the best pipeline, there is a convenient accessor for that.The pipeline returned is already fitted on the input X, y data that we passed to AutoMLSearch. To turn off this default behavior, set `train_best_pipeline=False` when initializing AutoMLSearch.
###Code
best_pipeline = automl.best_pipeline
print(best_pipeline.name)
print(best_pipeline.parameters)
best_pipeline.predict(X)
###Output
_____no_output_____
###Markdown
Saving AutoMLSearch and pipelines from AutoMLSearchThere are two ways to save results from AutoMLSearch. - You can save the AutoMLSearch object itself, calling `.save()` to do so. This will allow you to save the AutoMLSearch state and reload all pipelines from this.- If you want to save a pipeline from AutoMLSearch for future use, you can pickle the resulting pipeline. While pipeline classes themselves have a `.save()` method, you can also pickle the resulting pipeline in order to save it.** Stacked Ensembling pipelines cannot currently be pickled
###Code
# saving the best pipeline using .save()
# best_pipeline.save("file_path_here")
# saving the best pipeline using pickle
import pickle
pickled_pipeline = pickle.dumps(best_pipeline)
best_unpickled_pipeline = pickle.loads(pickled_pipeline)
best_unpickled_pipeline
###Output
_____no_output_____
###Markdown
Limiting the AutoML Search SpaceThe AutoML search algorithm first trains each component in the pipeline with their default values. After the first iteration, it then tweaks the parameters of these components using the pre-defined hyperparameter ranges that these components have. To limit the search over certain hyperparameter ranges, you can specify a `pipeline_parameters` argument with your pipeline parameters. These parameters will also limit the hyperparameter search space. Hyperparameter ranges can be found through the [API reference](https://evalml.alteryx.com/en/stable/api_reference.html) for each component. Parameter arguments must be specified as dictionaries, but the associated values can be single values, list/tuples, or `skopt.space` Real, Integer, Categorical values.
###Code
from evalml import AutoMLSearch
from evalml.demos import load_fraud
from skopt.space import Categorical
from evalml.model_family import ModelFamily
import woodwork as ww
X, y = load_fraud(n_rows=1000)
# example of setting parameter to just one value
pipeline_hyperparameters = {'Imputer': {
'numeric_impute_strategy': 'mean'
}}
# limit the numeric impute strategy to include only `median` and `most_frequent`
# `mean` is the default value for this argument, but it doesn't need to be included in the specified hyperparameter range for this to work
pipeline_hyperparameters = {'Imputer': {
'numeric_impute_strategy': ['median', 'most_frequent']
}}
# example using skopt.space.Categorical
pipeline_hyperparameters = {'Imputer': {
'numeric_impute_strategy': Categorical(['median', 'most_frequent'])
}}
# using this pipeline parameter means that our Imputer components in the pipelines will only search through 'median' and 'most_frequent' stretegies for 'numeric_impute_strategy'
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', pipeline_parameters=pipeline_hyperparameters)
automl.search()
automl.best_pipeline.hyperparameters
###Output
_____no_output_____
###Markdown
Access raw resultsThe `AutoMLSearch` class records detailed results information under the `results` field, including information about the cross-validation scoring and parameters.
###Code
automl.results
###Output
_____no_output_____
###Markdown
Adding ensemble methods to AutoML Stacking[Stacking](https://en.wikipedia.org/wiki/Ensemble_learningStacking) is an ensemble machine learning algorithm that involves training a model to best combine the predictions of several base learning algorithms. First, each base learning algorithms is trained using the given data. Then, the combining algorithm or meta-learner is trained on the predictions made by those base learning algorithms to make a final prediction.AutoML enables stacking using the `ensembling` flag during initalization; this is set to `False` by default. The stacking ensemble pipeline runs in its own batch after a whole cycle of training has occurred (each allowed pipeline trains for one batch). Note that this means __a large number of iterations may need to run before the stacking ensemble runs__. It is also important to note that __only the first CV fold is calculated for stacking ensembles__ because the model internally uses CV folds.
###Code
X, y = evalml.demos.load_breast_cancer()
automl_with_ensembling = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.RANDOM_FOREST, ModelFamily.LINEAR_MODEL],
max_batches=5,
ensembling=True)
automl_with_ensembling.search()
###Output
_____no_output_____
###Markdown
We can view more information about the stacking ensemble pipeline (which was the best performing pipeline) by calling `.describe()`.
###Code
automl_with_ensembling.best_pipeline.describe()
###Output
_____no_output_____
###Markdown
Automated Machine Learning (AutoML) Search Background Machine Learning[Machine learning](https://en.wikipedia.org/wiki/Machine_learning) (ML) is the process of constructing a mathematical model of a system based on a sample dataset collected from that system.One of the main goals of training an ML model is to teach the model to separate the signal present in the data from the noise inherent in system and in the data collection process. If this is done effectively, the model can then be used to make accurate predictions about the system when presented with new, similar data. Additionally, introspecting on an ML model can reveal key information about the system being modeled, such as which inputs and transformations of the inputs are most useful to the ML model for learning the signal in the data, and are therefore the most predictive.There are [a variety](https://en.wikipedia.org/wiki/Machine_learningApproaches) of ML problem types. Supervised learning describes the case where the collected data contains an output value to be modeled and a set of inputs with which to train the model. EvalML focuses on training supervised learning models.EvalML supports three common supervised ML problem types. The first is regression, where the target value to model is a continuous numeric value. Next are binary and multiclass classification, where the target value to model consists of two or more discrete values or categories. The choice of which supervised ML problem type is most appropriate depends on domain expertise and on how the model will be evaluated and used. EvalML is currently building support for supervised time series problems: time series regression, time series binary classification, and time series multiclass classification. While we've added some features to tackle these kinds of problems, our functionality is still being actively developed so please be mindful of that before using it. AutoML and Search[AutoML](https://en.wikipedia.org/wiki/Automated_machine_learning) is the process of automating the construction, training and evaluation of ML models. Given a data and some configuration, AutoML searches for the most effective and accurate ML model or models to fit the dataset. During the search, AutoML will explore different combinations of model type, model parameters and model architecture.An effective AutoML solution offers several advantages over constructing and tuning ML models by hand. AutoML can assist with many of the difficult aspects of ML, such as avoiding overfitting and underfitting, imbalanced data, detecting data leakage and other potential issues with the problem setup, and automatically applying best-practice data cleaning, feature engineering, feature selection and various modeling techniques. AutoML can also leverage search algorithms to optimally sweep the hyperparameter search space, resulting in model performance which would be difficult to achieve by manual training. AutoML in EvalMLEvalML supports all of the above and more.In its simplest usage, the AutoML search interface requires only the input data, the target data and a `problem_type` specifying what kind of supervised ML problem to model.** Graphing methods, like verbose AutoMLSearch, on Jupyter Notebook and Jupyter Lab require [ipywidgets](https://ipywidgets.readthedocs.io/en/latest/user_install.html) to be installed.** If graphing on Jupyter Lab, [jupyterlab-plotly](https://plotly.com/python/getting-started/jupyterlab-support-python-35) required. To download this, make sure you have [npm](https://nodejs.org/en/download/) installed.
###Code
import evalml
from evalml.utils import infer_feature_types
X, y = evalml.demos.load_fraud(n_rows=250)
###Output
_____no_output_____
###Markdown
To provide data to EvalML, it is recommended that you initialize a [Woodwork accessor](https://woodwork.alteryx.com/en/stable/) on your data. This allows you to easily control how EvalML will treat each of your features before training a model.EvalML also accepts ``pandas`` input, and will run type inference on top of the input ``pandas`` data. If you'd like to change the types inferred by EvalML, you can use the `infer_feature_types` utility method, which takes pandas or numpy input and converts it to a Woodwork data structure. The `feature_types` parameter can be used to specify what types specific columns should be.Feature types such as `Natural Language` must be specified in this way, otherwise Woodwork will infer it as `Unknown` type and drop it during the AutoMLSearch.In the example below, we reformat a couple features to make them easily consumable by the model, and then specify that the provider, which would have otherwise been inferred as a column with natural language, is a categorical column.
###Code
X.ww['expiration_date'] = X['expiration_date'].apply(lambda x: '20{}-01-{}'.format(x.split("/")[1], x.split("/")[0]))
X = infer_feature_types(X, feature_types= {'store_id': 'categorical',
'expiration_date': 'datetime',
'lat': 'categorical',
'lng': 'categorical',
'provider': 'categorical'})
###Output
_____no_output_____
###Markdown
In order to validate the results of the pipeline creation and optimization process, we will save some of our data as a holdout set.
###Code
X_train, X_holdout, y_train, y_holdout = evalml.preprocessing.split_data(X, y, problem_type='binary', test_size=.2)
###Output
_____no_output_____
###Markdown
Data ChecksBefore calling `AutoMLSearch.search`, we should run some sanity checks on our data to ensure that the input data being passed will not run into some common issues before running a potentially time-consuming search. EvalML has various data checks that makes this easy. Each data check will return a collection of warnings and errors if it detects potential issues with the input data. This allows users to inspect their data to avoid confusing errors that may arise during the search process. You can learn about each of the data checks available through our [data checks guide](data_checks.ipynb) Here, we will run the `DefaultDataChecks` class, which contains a series of data checks that are generally useful.
###Code
from evalml.data_checks import DefaultDataChecks
data_checks = DefaultDataChecks("binary", "log loss binary")
data_checks.validate(X_train, y_train)
###Output
_____no_output_____
###Markdown
Since there were no warnings or errors returned, we can safely continue with the search process.
###Code
automl = evalml.automl.AutoMLSearch(X_train=X_train, y_train=y_train, problem_type='binary', verbose=True)
automl.search()
###Output
_____no_output_____
###Markdown
With the `verbose` argument set to True, the AutoML search will log its progress, reporting each pipeline and parameter set evaluated during the search.There are a number of mechanisms to control the AutoML search time. One way is to set the `max_batches` parameter which controls the maximum number of rounds of AutoML to evaluate, where each round may train and score a variable number of pipelines. Another way is to set the `max_iterations` parameter which controls the maximum number of candidate models to be evaluated during AutoML. By default, AutoML will search for a single batch. The first pipeline to be evaluated will always be a baseline model representing a trivial solution. The AutoML interface supports a variety of other parameters. For a comprehensive list, please [refer to the API reference.](../autoapi/evalml/automl/index.rstevalml.automl.AutoMLSearch) We also provide [a standalone search method](../autoapi/evalml/automl/index.rstevalml.automl.search) which does all of the above in a single line, and returns the `AutoMLSearch` instance and data check results. If there were data check errors, AutoML will not be run and no `AutoMLSearch` instance will be returned. Detecting Problem TypeEvalML includes a simple method, `detect_problem_type`, to help determine the problem type given the target data. This function can return the predicted problem type as a ProblemType enum, choosing from ProblemType.BINARY, ProblemType.MULTICLASS, and ProblemType.REGRESSION. If the target data is invalid (for instance when there is only 1 unique label), the function will throw an error instead.
###Code
import pandas as pd
from evalml.problem_types import detect_problem_type
y_binary = pd.Series([0, 1, 1, 0, 1, 1])
detect_problem_type(y_binary)
###Output
_____no_output_____
###Markdown
Objective parameterAutoMLSearch takes in an `objective` parameter to determine which `objective` to optimize for. By default, this parameter is set to `auto`, which allows AutoML to choose `LogLossBinary` for binary classification problems, `LogLossMulticlass` for multiclass classification problems, and `R2` for regression problems.It should be noted that the `objective` parameter is only used in ranking and helping choose the pipelines to iterate over, but is not used to optimize each individual pipeline during fit-time.To get the default objective for each problem type, you can use the `get_default_primary_search_objective` function.
###Code
from evalml.automl import get_default_primary_search_objective
binary_objective = get_default_primary_search_objective("binary")
multiclass_objective = get_default_primary_search_objective("multiclass")
regression_objective = get_default_primary_search_objective("regression")
print(binary_objective.name)
print(multiclass_objective.name)
print(regression_objective.name)
###Output
_____no_output_____
###Markdown
Using custom pipelinesEvalML's AutoML algorithm generates a set of pipelines to search with. To provide a custom set instead, set allowed_component_graphs to a dictionary of custom component graphs. `AutoMLSearch` will use these to generate `Pipeline` instances. Note: this will prevent AutoML from generating other pipelines to search over.
###Code
from evalml.pipelines import MulticlassClassificationPipeline
automl_custom = evalml.automl.AutoMLSearch(
X_train=X_train,
y_train=y_train,
problem_type='multiclass',
verbose=True,
allowed_component_graphs={"My_pipeline": ['Simple Imputer', 'Random Forest Classifier'],
"My_other_pipeline": ['One Hot Encoder', 'Random Forest Classifier']})
###Output
_____no_output_____
###Markdown
Stopping the search earlyTo stop the search early, hit `Ctrl-C`. This will bring up a prompt asking for confirmation. Responding with `y` will immediately stop the search. Responding with `n` will continue the search. Callback functions``AutoMLSearch`` supports several callback functions, which can be specified as parameters when initializing an ``AutoMLSearch`` object. They are:- ``start_iteration_callback``- ``add_result_callback``- ``error_callback`` Start Iteration CallbackUsers can set ``start_iteration_callback`` to set what function is called before each pipeline training iteration. This callback function must take three positional parameters: the pipeline class, the pipeline parameters, and the ``AutoMLSearch`` object.
###Code
## start_iteration_callback example function
def start_iteration_callback_example(pipeline_class, pipeline_params, automl_obj):
print ("Training pipeline with the following parameters:", pipeline_params)
###Output
_____no_output_____
###Markdown
Add Result CallbackUsers can set ``add_result_callback`` to set what function is called after each pipeline training iteration. This callback function must take three positional parameters: a dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the ``AutoMLSearch`` object.
###Code
## add_result_callback example function
def add_result_callback_example(pipeline_results_dict, untrained_pipeline, automl_obj):
print ("Results for trained pipeline with the following parameters:", pipeline_results_dict)
###Output
_____no_output_____
###Markdown
Error CallbackUsers can set the ``error_callback`` to set what function called when `search()` errors and raises an ``Exception``. This callback function takes three positional parameters: the ``Exception raised``, the traceback, and the ``AutoMLSearch object``. This callback function must also accept ``kwargs``, so ``AutoMLSearch`` is able to pass along other parameters used by default.Evalml defines several error callback functions, which can be found under `evalml.automl.callbacks`. They are:- `silent_error_callback`- `raise_error_callback`- `log_and_save_error_callback`- `raise_and_save_error_callback`- `log_error_callback` (default used when ``error_callback`` is None)
###Code
# error_callback example; this is implemented in the evalml library
def raise_error_callback(exception, traceback, automl, **kwargs):
"""Raises the exception thrown by the AutoMLSearch object. Also logs the exception as an error."""
logger.error(f'AutoMLSearch raised a fatal exception: {str(exception)}')
logger.error("\n".join(traceback))
raise exception
###Output
_____no_output_____
###Markdown
View RankingsA summary of all the pipelines built can be returned as a pandas DataFrame which is sorted by score. The score column contains the average score across all cross-validation folds while the validation_score column is computed from the first cross-validation fold.
###Code
automl.rankings
###Output
_____no_output_____
###Markdown
Describe PipelineEach pipeline is given an `id`. We can get more information about any particular pipeline using that `id`. Here, we will get more information about the pipeline with `id = 1`.
###Code
automl.describe_pipeline(1)
###Output
_____no_output_____
###Markdown
Get PipelineWe can get the object of any pipeline via their `id` as well:
###Code
pipeline = automl.get_pipeline(1)
print(pipeline.name)
print(pipeline.parameters)
###Output
_____no_output_____
###Markdown
Get best pipelineIf you specifically want to get the best pipeline, there is a convenient accessor for that.The pipeline returned is already fitted on the input X, y data that we passed to AutoMLSearch. To turn off this default behavior, set `train_best_pipeline=False` when initializing AutoMLSearch.
###Code
best_pipeline = automl.best_pipeline
print(best_pipeline.name)
print(best_pipeline.parameters)
best_pipeline.predict(X_train)
###Output
_____no_output_____
###Markdown
Training and Scoring Multiple Pipelines using AutoMLSearchAutoMLSearch will automatically fit the best pipeline on the entire training data. It also provides an easy API for training and scoring other pipelines.If you'd like to train one or more pipelines on the entire training data, you can use the `train_pipelines`methodSimilarly, if you'd like to score one or more pipelines on a particular dataset, you can use the `train_pipelines`method
###Code
trained_pipelines = automl.train_pipelines([automl.get_pipeline(i) for i in [0, 1, 2]])
trained_pipelines
pipeline_holdout_scores = automl.score_pipelines([trained_pipelines[name] for name in trained_pipelines.keys()],
X_holdout,
y_holdout,
['Accuracy Binary', 'F1', 'AUC'])
pipeline_holdout_scores
###Output
_____no_output_____
###Markdown
Saving AutoMLSearch and pipelines from AutoMLSearchThere are two ways to save results from AutoMLSearch. - You can save the AutoMLSearch object itself, calling `.save()` to do so. This will allow you to save the AutoMLSearch state and reload all pipelines from this.- If you want to save a pipeline from AutoMLSearch for future use, pipeline classes themselves have a `.save()` method.
###Code
# saving the entire automl search
automl.save("automl.cloudpickle")
automl2 = evalml.automl.AutoMLSearch.load("automl.cloudpickle")
# saving the best pipeline using .save()
best_pipeline.save("pipeline.cloudpickle")
best_pipeline_copy = evalml.pipelines.PipelineBase.load("pipeline.cloudpickle")
###Output
_____no_output_____
###Markdown
Limiting the AutoML Search SpaceThe AutoML search algorithm first trains each component in the pipeline with their default values. After the first iteration, it then tweaks the parameters of these components using the pre-defined hyperparameter ranges that these components have. To limit the search over certain hyperparameter ranges, you can specify a `custom_hyperparameters` argument with your `AutoMLSearch` parameters. These parameters will limit the hyperparameter search space. Hyperparameter ranges can be found through the [API reference](https://evalml.alteryx.com/en/stable/api_reference.html) for each component. Parameter arguments must be specified as dictionaries, but the associated values can be single values or `skopt.space` Real, Integer, Categorical values.If however you'd like to specify certain values for the initial batch of the AutoML search algorithm, you can use the `pipeline_parameters` argument. This will set the initial batch's component parameters to the values passed by this argument.
###Code
from evalml import AutoMLSearch
from evalml.demos import load_fraud
from skopt.space import Categorical
from evalml.model_family import ModelFamily
import woodwork as ww
X, y = load_fraud(n_rows=1000)
# example of setting parameter to just one value
custom_hyperparameters = {'Imputer': {
'numeric_impute_strategy': 'mean'
}}
# limit the numeric impute strategy to include only `median` and `most_frequent`
# `mean` is the default value for this argument, but it doesn't need to be included in the specified hyperparameter range for this to work
custom_hyperparameters = {'Imputer': {
'numeric_impute_strategy': Categorical(['median', 'most_frequent'])
}}
# set the initial batch numeric impute strategy strategy to 'median'
pipeline_parameters = {'Imputer': {
'numeric_impute_strategy': 'median'
}}
# using this custom hyperparameter means that our Imputer components in these pipelines will only search through
# 'median' and 'most_frequent' strategies for 'numeric_impute_strategy', and the initial batch parameter will be
# set to 'median'
automl_constrained = AutoMLSearch(X_train=X, y_train=y, problem_type='binary',
pipeline_parameters=pipeline_parameters,
custom_hyperparameters=custom_hyperparameters,
verbose=True)
###Output
_____no_output_____
###Markdown
Imbalanced DataThe AutoML search algorithm now has functionality to handle imbalanced data during classification! AutoMLSearch now provides two additional parameters, `sampler_method` and `sampler_balanced_ratio`, that allow you to let AutoMLSearch know whether to sample imbalanced data, and how to do so. `sampler_method` takes in either `Undersampler`, `Oversampler`, `auto`, or None as the sampler to use, and `sampler_balanced_ratio` specifies the `minority/majority` ratio that you want to sample to. Details on the Undersampler and Oversampler components can be found in the [documentation](https://evalml.alteryx.com/en/stable/api_reference.htmltransformers).This can be used for imbalanced datasets, like the fraud dataset, which has a 'minority:majority' ratio of < 0.2.
###Code
automl_auto = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', automl_algorithm='iterative')
automl_auto.allowed_pipelines[-1]
###Output
_____no_output_____
###Markdown
The Oversampler is chosen as the default sampling component here, since the `sampler_balanced_ratio = 0.25`. If you specified a lower ratio, for instance `sampler_balanced_ratio = 0.1`, then there would be no sampling component added here. This is because if a ratio of 0.1 would be considered balanced, then a ratio of 0.2 would also be balanced.The Oversampler uses SMOTE under the hood, and automatically selects whether to use SMOTE, SMOTEN, or SMOTENC based on the data it receives.
###Code
automl_auto_ratio = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', sampler_balanced_ratio=0.1, automl_algorithm='iterative')
automl_auto_ratio.allowed_pipelines[-1]
###Output
_____no_output_____
###Markdown
Additionally, you can add more fine-grained sampling ratios by passing in a `sampling_ratio_dict` in pipeline parameters. For this dictionary, AutoMLSearch expects the keys to be int values from 0 to `n-1` for the classes, and the values would be the `sampler_balanced__ratio` associated with each target. This dictionary would override the AutoML argument `sampler_balanced_ratio`. Below, you can see the scenario for Oversampler component on this dataset. Note that the logic for Undersamplers is included in the commented section.
###Code
# In this case, the majority class is the negative class
# for the oversampler, we don't want to oversample this class, so class 0 (majority) will have a ratio of 1 to itself
# for the minority class 1, we want to oversample it to have a minority/majority ratio of 0.5, which means we want minority to have 1/2 the samples as the minority
sampler_ratio_dict = {0: 1, 1: 0.5}
pipeline_parameters = {"Oversampler": {"sampler_balanced_ratio": sampler_ratio_dict}}
automl_auto_ratio_dict = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', pipeline_parameters=pipeline_parameters, automl_algorithm='iterative')
automl_auto_ratio_dict.allowed_pipelines[-1]
# Undersampler case
# we don't want to undersample this class, so class 1 (minority) will have a ratio of 1 to itself
# for the majority class 0, we want to undersample it to have a minority/majority ratio of 0.5, which means we want majority to have 2x the samples as the minority
# sampler_ratio_dict = {0: 0.5, 1: 1}
# pipeline_parameters = {"Oversampler": {"sampler_balanced_ratio": sampler_ratio_dict}}
# automl_auto_ratio_dict = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', pipeline_parameters=pipeline_parameters)
###Output
_____no_output_____
###Markdown
Adding ensemble methods to AutoML Stacking[Stacking](https://en.wikipedia.org/wiki/Ensemble_learningStacking) is an ensemble machine learning algorithm that involves training a model to best combine the predictions of several base learning algorithms. First, each base learning algorithms is trained using the given data. Then, the combining algorithm or meta-learner is trained on the predictions made by those base learning algorithms to make a final prediction.AutoML enables stacking using the `ensembling` flag during initalization; this is set to `False` by default. The stacking ensemble pipeline runs in its own batch after a whole cycle of training has occurred (each allowed pipeline trains for one batch). Note that this means __a large number of iterations may need to run before the stacking ensemble runs__. It is also important to note that __only the first CV fold is calculated for stacking ensembles__ because the model internally uses CV folds.
###Code
X, y = evalml.demos.load_breast_cancer()
automl_with_ensembling = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.LINEAR_MODEL],
max_batches=4,
ensembling=True,
automl_algorithm='iterative',
verbose=True)
automl_with_ensembling.search()
###Output
_____no_output_____
###Markdown
We can view more information about the stacking ensemble pipeline (which was the best performing pipeline) by calling `.describe()`.
###Code
automl_with_ensembling.best_pipeline.describe()
###Output
_____no_output_____
###Markdown
Access raw resultsThe `AutoMLSearch` class records detailed results information under the `results` field, including information about the cross-validation scoring and parameters.
###Code
import pprint
pp = pprint.PrettyPrinter(indent=0, width=100, depth=3, compact=True, sort_dicts=False)
pp.pprint(automl.results)
###Output
_____no_output_____
###Markdown
Parallel AutoMLBy default, all pipelines in an AutoML batch are evaluated in series. Pipelines can be evaluated in parallel to improve performance during AutoML search. This is accomplished by a futures style submission and evaluation of pipelines in a batch. As of this writing, the pipelines use a threaded model for concurrent evaluation. This is similar to the currently implemented `n_jobs` parameter in the estimators, which uses increased numbers of threads to train and evaluate estimators. Quick StartTo quickly use some parallelism to enhance the pipeline searching, a string can be passed through to AutoMLSearch during initialization to setup the parallel engine and client within the AutoMLSearch object. The current options are "cf_threaded", "cf_process", "dask_threaded" and "dask_process" and indicate the futures backend to use and whether to use threaded- or process-level parallelism.
###Code
automl_cf_threaded = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.LINEAR_MODEL],
engine="cf_threaded")
automl_cf_threaded.search(show_iteration_plot = False)
automl_cf_threaded.close_engine()
###Output
_____no_output_____
###Markdown
Parallelism with Concurrent FuturesThe `EngineBase` class is robust and extensible enough to support futures-like implementations from a variety of libraries. The `CFEngine` extends the `EngineBase` to use the native Python [concurrent.futures library](https://docs.python.org/3/library/concurrent.futures.html). The `CFEngine` supports both thread- and process-level parallelism. The type of parallelism can be chosen using either the `ThreadPoolExecutor` or the `ProcessPoolExecutor`. If either executor is passed a `max_workers` parameter, it will set the number of processes and threads spawned. If not, the default number of processes will be equal to the number of processors available and the number of threads set to five times the number of processors available.Here, the CFEngine is invoked with default parameters, which is threaded parallelism using all available threads.
###Code
from concurrent.futures import ThreadPoolExecutor
from evalml.automl.engine.cf_engine import CFEngine, CFClient
cf_engine = CFEngine(CFClient(ThreadPoolExecutor(max_workers=4)))
automl_cf_threaded = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.LINEAR_MODEL],
engine=cf_engine)
automl_cf_threaded.search(show_iteration_plot = False)
automl_cf_threaded.close_engine()
###Output
_____no_output_____
###Markdown
Note: the cell demonstrating process-level parallelism is a markdown due to incompatibility with our ReadTheDocs build. It can be run successfully locally.```pythonfrom concurrent.futures import ProcessPoolExecutor Repeat the process but using process-level parallelism\cf_engine = CFEngine(CFClient(ProcessPoolExecutor(max_workers=2)))automl_cf_process = AutoMLSearch(X_train=X, y_train=y, problem_type="binary", engine="cf_process")automl_cf_process.search(show_iteration_plot = False)automl_cf_process.close_engine()``` Parallelism with DaskThread or process level parallelism can be explicitly invoked for the `DaskEngine` (as well as the `CFEngine`). The `processes` can be set to `True` and the number of processes set using `n_workers`. If `processes` is set to `False`, then the resulting parallelism will be threaded and `n_workers` will represent the threads used. Examples of both follow.
###Code
from dask.distributed import LocalCluster
from evalml.automl.engine import DaskEngine
dask_engine_p2 = DaskEngine(cluster=LocalCluster(processes=True, n_workers = 2))
automl_dask_p2 = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.LINEAR_MODEL],
engine=dask_engine_p2)
automl_dask_p2.search(show_iteration_plot = False)
# Explicitly shutdown the automl object's LocalCluster
automl_dask_p2.close_engine()
dask_engine_t4 = DaskEngine(cluster=LocalCluster(processes=False, n_workers = 4))
automl_dask_t4 = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.LINEAR_MODEL],
engine=dask_engine_t4)
automl_dask_t4.search(show_iteration_plot = False)
automl_dask_t4.close_engine()
###Output
_____no_output_____
###Markdown
As we can see, a significant performance gain can result from simply using something other than the default `SequentialEngine`, ranging from a 100% speed up with multiple processes to 500% speedup with multiple threads!
###Code
print("Sequential search duration: %s" % str(automl.search_duration))
print("Concurrent futures (threaded) search duration: %s" % str(automl_cf_threaded.search_duration))
print("Dask (two processes) search duration: %s" % str(automl_dask_p2.search_duration))
print("Dask (four threads)search duration: %s" % str(automl_dask_t4.search_duration))
###Output
_____no_output_____
###Markdown
Automated Machine Learning (AutoML) Search Background Machine Learning[Machine learning](https://en.wikipedia.org/wiki/Machine_learning) (ML) is the process of constructing a mathematical model of a system based on a sample dataset collected from that system.One of the main goals of training an ML model is to teach the model to separate the signal present in the data from the noise inherent in system and in the data collection process. If this is done effectively, the model can then be used to make accurate predictions about the system when presented with new, similar data. Additionally, introspecting on an ML model can reveal key information about the system being modeled, such as which inputs and transformations of the inputs are most useful to the ML model for learning the signal in the data, and are therefore the most predictive.There are [a variety](https://en.wikipedia.org/wiki/Machine_learningApproaches) of ML problem types. Supervised learning describes the case where the collected data contains an output value to be modeled and a set of inputs with which to train the model. rayml focuses on training supervised learning models.rayml supports three common supervised ML problem types. The first is regression, where the target value to model is a continuous numeric value. Next are binary and multiclass classification, where the target value to model consists of two or more discrete values or categories. The choice of which supervised ML problem type is most appropriate depends on domain expertise and on how the model will be evaluated and used. rayml is currently building support for supervised time series problems: time series regression, time series binary classification, and time series multiclass classification. While we've added some features to tackle these kinds of problems, our functionality is still being actively developed so please be mindful of that before using it. AutoML and Search[AutoML](https://en.wikipedia.org/wiki/Automated_machine_learning) is the process of automating the construction, training and evaluation of ML models. Given a data and some configuration, AutoML searches for the most effective and accurate ML model or models to fit the dataset. During the search, AutoML will explore different combinations of model type, model parameters and model architecture.An effective AutoML solution offers several advantages over constructing and tuning ML models by hand. AutoML can assist with many of the difficult aspects of ML, such as avoiding overfitting and underfitting, imbalanced data, detecting data leakage and other potential issues with the problem setup, and automatically applying best-practice data cleaning, feature engineering, feature selection and various modeling techniques. AutoML can also leverage search algorithms to optimally sweep the hyperparameter search space, resulting in model performance which would be difficult to achieve by manual training. AutoML in raymlrayml supports all of the above and more.In its simplest usage, the AutoML search interface requires only the input data, the target data and a `problem_type` specifying what kind of supervised ML problem to model.** Graphing methods, like verbose AutoMLSearch, on Jupyter Notebook and Jupyter Lab require [ipywidgets](https://ipywidgets.readthedocs.io/en/latest/user_install.html) to be installed.** If graphing on Jupyter Lab, [jupyterlab-plotly](https://plotly.com/python/getting-started/jupyterlab-support-python-35) required. To download this, make sure you have [npm](https://nodejs.org/en/download/) installed.
###Code
import rayml
from rayml.utils import infer_feature_types
X, y = rayml.demos.load_fraud(n_rows=250)
###Output
_____no_output_____
###Markdown
To provide data to rayml, it is recommended that you initialize a [Woodwork accessor](https://woodwork.alteryx.com/en/stable/) on your data. This allows you to easily control how rayml will treat each of your features before training a model.rayml also accepts ``pandas`` input, and will run type inference on top of the input ``pandas`` data. If you'd like to change the types inferred by rayml, you can use the `infer_feature_types` utility method, which takes pandas or numpy input and converts it to a Woodwork data structure. The `feature_types` parameter can be used to specify what types specific columns should be.Feature types such as `Natural Language` must be specified in this way, otherwise Woodwork will infer it as `Unknown` type and drop it during the AutoMLSearch.In the example below, we reformat a couple features to make them easily consumable by the model, and then specify that the provider, which would have otherwise been inferred as a column with natural language, is a categorical column.
###Code
X.ww['expiration_date'] = X['expiration_date'].apply(lambda x: '20{}-01-{}'.format(x.split("/")[1], x.split("/")[0]))
X = infer_feature_types(X, feature_types= {'store_id': 'categorical',
'expiration_date': 'datetime',
'lat': 'categorical',
'lng': 'categorical',
'provider': 'categorical'})
###Output
_____no_output_____
###Markdown
In order to validate the results of the pipeline creation and optimization process, we will save some of our data as a holdout set.
###Code
X_train, X_holdout, y_train, y_holdout = rayml.preprocessing.split_data(X, y, problem_type='binary', test_size=.2)
###Output
_____no_output_____
###Markdown
Data ChecksBefore calling `AutoMLSearch.search`, we should run some sanity checks on our data to ensure that the input data being passed will not run into some common issues before running a potentially time-consuming search. rayml has various data checks that makes this easy. Each data check will return a collection of warnings and errors if it detects potential issues with the input data. This allows users to inspect their data to avoid confusing errors that may arise during the search process. You can learn about each of the data checks available through our [data checks guide](data_checks.ipynb) Here, we will run the `DefaultDataChecks` class, which contains a series of data checks that are generally useful.
###Code
from rayml.data_checks import DefaultDataChecks
data_checks = DefaultDataChecks("binary", "log loss binary")
data_checks.validate(X_train, y_train)
###Output
_____no_output_____
###Markdown
Since there were no warnings or errors returned, we can safely continue with the search process.
###Code
automl = rayml.automl.AutoMLSearch(X_train=X_train, y_train=y_train, problem_type='binary', verbose=True)
automl.search()
###Output
_____no_output_____
###Markdown
With the `verbose` argument set to True, the AutoML search will log its progress, reporting each pipeline and parameter set evaluated during the search.There are a number of mechanisms to control the AutoML search time. One way is to set the `max_batches` parameter which controls the maximum number of rounds of AutoML to evaluate, where each round may train and score a variable number of pipelines. Another way is to set the `max_iterations` parameter which controls the maximum number of candidate models to be evaluated during AutoML. By default, AutoML will search for a single batch. The first pipeline to be evaluated will always be a baseline model representing a trivial solution. The AutoML interface supports a variety of other parameters. For a comprehensive list, please [refer to the API reference.](../autoapi/rayml/automl/index.rstrayml.automl.AutoMLSearch) We also provide [a standalone search method](../autoapi/rayml/automl/index.rstrayml.automl.search) which does all of the above in a single line, and returns the `AutoMLSearch` instance and data check results. If there were data check errors, AutoML will not be run and no `AutoMLSearch` instance will be returned. Detecting Problem Typerayml includes a simple method, `detect_problem_type`, to help determine the problem type given the target data. This function can return the predicted problem type as a ProblemType enum, choosing from ProblemType.BINARY, ProblemType.MULTICLASS, and ProblemType.REGRESSION. If the target data is invalid (for instance when there is only 1 unique label), the function will throw an error instead.
###Code
import pandas as pd
from rayml.problem_types import detect_problem_type
y_binary = pd.Series([0, 1, 1, 0, 1, 1])
detect_problem_type(y_binary)
###Output
_____no_output_____
###Markdown
Objective parameterAutoMLSearch takes in an `objective` parameter to determine which `objective` to optimize for. By default, this parameter is set to `auto`, which allows AutoML to choose `LogLossBinary` for binary classification problems, `LogLossMulticlass` for multiclass classification problems, and `R2` for regression problems.It should be noted that the `objective` parameter is only used in ranking and helping choose the pipelines to iterate over, but is not used to optimize each individual pipeline during fit-time.To get the default objective for each problem type, you can use the `get_default_primary_search_objective` function.
###Code
from rayml.automl import get_default_primary_search_objective
binary_objective = get_default_primary_search_objective("binary")
multiclass_objective = get_default_primary_search_objective("multiclass")
regression_objective = get_default_primary_search_objective("regression")
print(binary_objective.name)
print(multiclass_objective.name)
print(regression_objective.name)
###Output
_____no_output_____
###Markdown
Using custom pipelinesrayml's AutoML algorithm generates a set of pipelines to search with. To provide a custom set instead, set allowed_component_graphs to a dictionary of custom component graphs. `AutoMLSearch` will use these to generate `Pipeline` instances. Note: this will prevent AutoML from generating other pipelines to search over.
###Code
from rayml.pipelines import MulticlassClassificationPipeline
automl_custom = rayml.automl.AutoMLSearch(
X_train=X_train,
y_train=y_train,
problem_type='multiclass',
verbose=True,
allowed_component_graphs={"My_pipeline": ['Simple Imputer', 'Random Forest Classifier'],
"My_other_pipeline": ['One Hot Encoder', 'Random Forest Classifier']})
###Output
_____no_output_____
###Markdown
Stopping the search earlyTo stop the search early, hit `Ctrl-C`. This will bring up a prompt asking for confirmation. Responding with `y` will immediately stop the search. Responding with `n` will continue the search. Callback functions``AutoMLSearch`` supports several callback functions, which can be specified as parameters when initializing an ``AutoMLSearch`` object. They are:- ``start_iteration_callback``- ``add_result_callback``- ``error_callback`` Start Iteration CallbackUsers can set ``start_iteration_callback`` to set what function is called before each pipeline training iteration. This callback function must take three positional parameters: the pipeline class, the pipeline parameters, and the ``AutoMLSearch`` object.
###Code
## start_iteration_callback example function
def start_iteration_callback_example(pipeline_class, pipeline_params, automl_obj):
print ("Training pipeline with the following parameters:", pipeline_params)
###Output
_____no_output_____
###Markdown
Add Result CallbackUsers can set ``add_result_callback`` to set what function is called after each pipeline training iteration. This callback function must take three positional parameters: a dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the ``AutoMLSearch`` object.
###Code
## add_result_callback example function
def add_result_callback_example(pipeline_results_dict, untrained_pipeline, automl_obj):
print ("Results for trained pipeline with the following parameters:", pipeline_results_dict)
###Output
_____no_output_____
###Markdown
Error CallbackUsers can set the ``error_callback`` to set what function called when `search()` errors and raises an ``Exception``. This callback function takes three positional parameters: the ``Exception raised``, the traceback, and the ``AutoMLSearch object``. This callback function must also accept ``kwargs``, so ``AutoMLSearch`` is able to pass along other parameters used by default.rayml defines several error callback functions, which can be found under `rayml.automl.callbacks`. They are:- `silent_error_callback`- `raise_error_callback`- `log_and_save_error_callback`- `raise_and_save_error_callback`- `log_error_callback` (default used when ``error_callback`` is None)
###Code
# error_callback example; this is implemented in the rayml library
def raise_error_callback(exception, traceback, automl, **kwargs):
"""Raises the exception thrown by the AutoMLSearch object. Also logs the exception as an error."""
logger.error(f'AutoMLSearch raised a fatal exception: {str(exception)}')
logger.error("\n".join(traceback))
raise exception
###Output
_____no_output_____
###Markdown
View RankingsA summary of all the pipelines built can be returned as a pandas DataFrame which is sorted by score. The score column contains the average score across all cross-validation folds while the validation_score column is computed from the first cross-validation fold.
###Code
automl.rankings
###Output
_____no_output_____
###Markdown
Describe PipelineEach pipeline is given an `id`. We can get more information about any particular pipeline using that `id`. Here, we will get more information about the pipeline with `id = 1`.
###Code
automl.describe_pipeline(1)
###Output
_____no_output_____
###Markdown
Get PipelineWe can get the object of any pipeline via their `id` as well:
###Code
pipeline = automl.get_pipeline(1)
print(pipeline.name)
print(pipeline.parameters)
###Output
_____no_output_____
###Markdown
Get best pipelineIf you specifically want to get the best pipeline, there is a convenient accessor for that.The pipeline returned is already fitted on the input X, y data that we passed to AutoMLSearch. To turn off this default behavior, set `train_best_pipeline=False` when initializing AutoMLSearch.
###Code
best_pipeline = automl.best_pipeline
print(best_pipeline.name)
print(best_pipeline.parameters)
best_pipeline.predict(X_train)
###Output
_____no_output_____
###Markdown
Training and Scoring Multiple Pipelines using AutoMLSearchAutoMLSearch will automatically fit the best pipeline on the entire training data. It also provides an easy API for training and scoring other pipelines.If you'd like to train one or more pipelines on the entire training data, you can use the `train_pipelines`methodSimilarly, if you'd like to score one or more pipelines on a particular dataset, you can use the `train_pipelines`method
###Code
trained_pipelines = automl.train_pipelines([automl.get_pipeline(i) for i in [0, 1, 2]])
trained_pipelines
pipeline_holdout_scores = automl.score_pipelines([trained_pipelines[name] for name in trained_pipelines.keys()],
X_holdout,
y_holdout,
['Accuracy Binary', 'F1', 'AUC'])
pipeline_holdout_scores
###Output
_____no_output_____
###Markdown
Saving AutoMLSearch and pipelines from AutoMLSearchThere are two ways to save results from AutoMLSearch. - You can save the AutoMLSearch object itself, calling `.save()` to do so. This will allow you to save the AutoMLSearch state and reload all pipelines from this.- If you want to save a pipeline from AutoMLSearch for future use, pipeline classes themselves have a `.save()` method.
###Code
# saving the entire automl search
automl.save("automl.cloudpickle")
automl2 = rayml.automl.AutoMLSearch.load("automl.cloudpickle")
# saving the best pipeline using .save()
best_pipeline.save("pipeline.cloudpickle")
best_pipeline_copy = rayml.pipelines.PipelineBase.load("pipeline.cloudpickle")
###Output
_____no_output_____
###Markdown
Limiting the AutoML Search SpaceThe AutoML search algorithm first trains each component in the pipeline with their default values. After the first iteration, it then tweaks the parameters of these components using the pre-defined hyperparameter ranges that these components have. To limit the search over certain hyperparameter ranges, you can specify a `custom_hyperparameters` argument with your `AutoMLSearch` parameters. These parameters will limit the hyperparameter search space. Hyperparameter ranges can be found through the [API reference](https://rayml.alteryx.com/en/stable/api_reference.html) for each component. Parameter arguments must be specified as dictionaries, but the associated values can be single values or `skopt.space` Real, Integer, Categorical values.If however you'd like to specify certain values for the initial batch of the AutoML search algorithm, you can use the `pipeline_parameters` argument. This will set the initial batch's component parameters to the values passed by this argument.
###Code
from rayml import AutoMLSearch
from rayml.demos import load_fraud
from skopt.space import Categorical
from rayml.model_family import ModelFamily
import woodwork as ww
X, y = load_fraud(n_rows=1000)
# example of setting parameter to just one value
custom_hyperparameters = {'Imputer': {
'numeric_impute_strategy': 'mean'
}}
# limit the numeric impute strategy to include only `median` and `most_frequent`
# `mean` is the default value for this argument, but it doesn't need to be included in the specified hyperparameter range for this to work
custom_hyperparameters = {'Imputer': {
'numeric_impute_strategy': Categorical(['median', 'most_frequent'])
}}
# set the initial batch numeric impute strategy strategy to 'median'
pipeline_parameters = {'Imputer': {
'numeric_impute_strategy': 'median'
}}
# using this custom hyperparameter means that our Imputer components in these pipelines will only search through
# 'median' and 'most_frequent' strategies for 'numeric_impute_strategy', and the initial batch parameter will be
# set to 'median'
automl_constrained = AutoMLSearch(X_train=X, y_train=y, problem_type='binary',
pipeline_parameters=pipeline_parameters,
custom_hyperparameters=custom_hyperparameters,
verbose=True)
###Output
_____no_output_____
###Markdown
Imbalanced DataThe AutoML search algorithm now has functionality to handle imbalanced data during classification! AutoMLSearch now provides two additional parameters, `sampler_method` and `sampler_balanced_ratio`, that allow you to let AutoMLSearch know whether to sample imbalanced data, and how to do so. `sampler_method` takes in either `Undersampler`, `Oversampler`, `auto`, or None as the sampler to use, and `sampler_balanced_ratio` specifies the `minority/majority` ratio that you want to sample to. Details on the Undersampler and Oversampler components can be found in the [documentation](https://rayml.alteryx.com/en/stable/api_reference.htmltransformers).This can be used for imbalanced datasets, like the fraud dataset, which has a 'minority:majority' ratio of < 0.2.
###Code
automl_auto = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', automl_algorithm='iterative')
automl_auto.allowed_pipelines[-1]
###Output
_____no_output_____
###Markdown
The Oversampler is chosen as the default sampling component here, since the `sampler_balanced_ratio = 0.25`. If you specified a lower ratio, for instance `sampler_balanced_ratio = 0.1`, then there would be no sampling component added here. This is because if a ratio of 0.1 would be considered balanced, then a ratio of 0.2 would also be balanced.The Oversampler uses SMOTE under the hood, and automatically selects whether to use SMOTE, SMOTEN, or SMOTENC based on the data it receives.
###Code
automl_auto_ratio = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', sampler_balanced_ratio=0.1, automl_algorithm='iterative')
automl_auto_ratio.allowed_pipelines[-1]
###Output
_____no_output_____
###Markdown
Additionally, you can add more fine-grained sampling ratios by passing in a `sampling_ratio_dict` in pipeline parameters. For this dictionary, AutoMLSearch expects the keys to be int values from 0 to `n-1` for the classes, and the values would be the `sampler_balanced__ratio` associated with each target. This dictionary would override the AutoML argument `sampler_balanced_ratio`. Below, you can see the scenario for Oversampler component on this dataset. Note that the logic for Undersamplers is included in the commented section.
###Code
# In this case, the majority class is the negative class
# for the oversampler, we don't want to oversample this class, so class 0 (majority) will have a ratio of 1 to itself
# for the minority class 1, we want to oversample it to have a minority/majority ratio of 0.5, which means we want minority to have 1/2 the samples as the minority
sampler_ratio_dict = {0: 1, 1: 0.5}
pipeline_parameters = {"Oversampler": {"sampler_balanced_ratio": sampler_ratio_dict}}
automl_auto_ratio_dict = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', pipeline_parameters=pipeline_parameters, automl_algorithm='iterative')
automl_auto_ratio_dict.allowed_pipelines[-1]
# Undersampler case
# we don't want to undersample this class, so class 1 (minority) will have a ratio of 1 to itself
# for the majority class 0, we want to undersample it to have a minority/majority ratio of 0.5, which means we want majority to have 2x the samples as the minority
# sampler_ratio_dict = {0: 0.5, 1: 1}
# pipeline_parameters = {"Oversampler": {"sampler_balanced_ratio": sampler_ratio_dict}}
# automl_auto_ratio_dict = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', pipeline_parameters=pipeline_parameters)
###Output
_____no_output_____
###Markdown
Adding ensemble methods to AutoML Stacking[Stacking](https://en.wikipedia.org/wiki/Ensemble_learningStacking) is an ensemble machine learning algorithm that involves training a model to best combine the predictions of several base learning algorithms. First, each base learning algorithms is trained using the given data. Then, the combining algorithm or meta-learner is trained on the predictions made by those base learning algorithms to make a final prediction.AutoML enables stacking using the `ensembling` flag during initalization; this is set to `False` by default. The stacking ensemble pipeline runs in its own batch after a whole cycle of training has occurred (each allowed pipeline trains for one batch). Note that this means __a large number of iterations may need to run before the stacking ensemble runs__. It is also important to note that __only the first CV fold is calculated for stacking ensembles__ because the model internally uses CV folds.
###Code
X, y = rayml.demos.load_breast_cancer()
automl_with_ensembling = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.LINEAR_MODEL],
max_batches=4,
ensembling=True,
automl_algorithm='iterative',
verbose=True)
automl_with_ensembling.search()
###Output
_____no_output_____
###Markdown
We can view more information about the stacking ensemble pipeline (which was the best performing pipeline) by calling `.describe()`.
###Code
automl_with_ensembling.best_pipeline.describe()
###Output
_____no_output_____
###Markdown
Access raw resultsThe `AutoMLSearch` class records detailed results information under the `results` field, including information about the cross-validation scoring and parameters.
###Code
import pprint
pp = pprint.PrettyPrinter(indent=0, width=100, depth=3, compact=True, sort_dicts=False)
pp.pprint(automl.results)
###Output
_____no_output_____
###Markdown
Parallel AutoMLBy default, all pipelines in an AutoML batch are evaluated in series. Pipelines can be evaluated in parallel to improve performance during AutoML search. This is accomplished by a futures style submission and evaluation of pipelines in a batch. As of this writing, the pipelines use a threaded model for concurrent evaluation. This is similar to the currently implemented `n_jobs` parameter in the estimators, which uses increased numbers of threads to train and evaluate estimators. Quick StartTo quickly use some parallelism to enhance the pipeline searching, a string can be passed through to AutoMLSearch during initialization to setup the parallel engine and client within the AutoMLSearch object. The current options are "cf_threaded", "cf_process", "dask_threaded" and "dask_process" and indicate the futures backend to use and whether to use threaded- or process-level parallelism.
###Code
automl_cf_threaded = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.LINEAR_MODEL],
engine="cf_threaded")
automl_cf_threaded.search(show_iteration_plot = False)
automl_cf_threaded.close_engine()
###Output
_____no_output_____
###Markdown
Parallelism with Concurrent FuturesThe `EngineBase` class is robust and extensible enough to support futures-like implementations from a variety of libraries. The `CFEngine` extends the `EngineBase` to use the native Python [concurrent.futures library](https://docs.python.org/3/library/concurrent.futures.html). The `CFEngine` supports both thread- and process-level parallelism. The type of parallelism can be chosen using either the `ThreadPoolExecutor` or the `ProcessPoolExecutor`. If either executor is passed a `max_workers` parameter, it will set the number of processes and threads spawned. If not, the default number of processes will be equal to the number of processors available and the number of threads set to five times the number of processors available.Here, the CFEngine is invoked with default parameters, which is threaded parallelism using all available threads.
###Code
from concurrent.futures import ThreadPoolExecutor
from rayml.automl.engine.cf_engine import CFEngine, CFClient
cf_engine = CFEngine(CFClient(ThreadPoolExecutor(max_workers=4)))
automl_cf_threaded = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.LINEAR_MODEL],
engine=cf_engine)
automl_cf_threaded.search(show_iteration_plot = False)
automl_cf_threaded.close_engine()
###Output
_____no_output_____
###Markdown
Note: the cell demonstrating process-level parallelism is a markdown due to incompatibility with our ReadTheDocs build. It can be run successfully locally.```pythonfrom concurrent.futures import ProcessPoolExecutor Repeat the process but using process-level parallelism\cf_engine = CFEngine(CFClient(ProcessPoolExecutor(max_workers=2)))automl_cf_process = AutoMLSearch(X_train=X, y_train=y, problem_type="binary", engine="cf_process")automl_cf_process.search(show_iteration_plot = False)automl_cf_process.close_engine()``` Parallelism with DaskThread or process level parallelism can be explicitly invoked for the `DaskEngine` (as well as the `CFEngine`). The `processes` can be set to `True` and the number of processes set using `n_workers`. If `processes` is set to `False`, then the resulting parallelism will be threaded and `n_workers` will represent the threads used. Examples of both follow.
###Code
from dask.distributed import LocalCluster
from rayml.automl.engine import DaskEngine
dask_engine_p2 = DaskEngine(cluster=LocalCluster(processes=True, n_workers = 2))
automl_dask_p2 = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.LINEAR_MODEL],
engine=dask_engine_p2)
automl_dask_p2.search(show_iteration_plot = False)
# Explicitly shutdown the automl object's LocalCluster
automl_dask_p2.close_engine()
dask_engine_t4 = DaskEngine(cluster=LocalCluster(processes=False, n_workers = 4))
automl_dask_t4 = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.LINEAR_MODEL],
engine=dask_engine_t4)
automl_dask_t4.search(show_iteration_plot = False)
automl_dask_t4.close_engine()
###Output
_____no_output_____
###Markdown
As we can see, a significant performance gain can result from simply using something other than the default `SequentialEngine`, ranging from a 100% speed up with multiple processes to 500% speedup with multiple threads!
###Code
print("Sequential search duration: %s" % str(automl.search_duration))
print("Concurrent futures (threaded) search duration: %s" % str(automl_cf_threaded.search_duration))
print("Dask (two processes) search duration: %s" % str(automl_dask_p2.search_duration))
print("Dask (four threads)search duration: %s" % str(automl_dask_t4.search_duration))
###Output
_____no_output_____
###Markdown
Automated Machine Learning (AutoML) Search Background Machine Learning[Machine learning](https://en.wikipedia.org/wiki/Machine_learning) (ML) is the process of constructing a mathematical model of a system based on a sample dataset collected from that system.One of the main goals of training an ML model is to teach the model to separate the signal present in the data from the noise inherent in system and in the data collection process. If this is done effectively, the model can then be used to make accurate predictions about the system when presented with new, similar data. Additionally, introspecting on an ML model can reveal key information about the system being modeled, such as which inputs and transformations of the inputs are most useful to the ML model for learning the signal in the data, and are therefore the most predictive.There are [a variety](https://en.wikipedia.org/wiki/Machine_learningApproaches) of ML problem types. Supervised learning describes the case where the collected data contains an output value to be modeled and a set of inputs with which to train the model. EvalML focuses on training supervised learning models.EvalML supports three common supervised ML problem types. The first is regression, where the target value to model is a continuous numeric value. Next are binary and multiclass classification, where the target value to model consists of two or more discrete values or categories. The choice of which supervised ML problem type is most appropriate depends on domain expertise and on how the model will be evaluated and used. AutoML and Search[AutoML](https://en.wikipedia.org/wiki/Automated_machine_learning) is the process of automating the construction, training and evaluation of ML models. Given a data and some configuration, AutoML searches for the most effective and accurate ML model or models to fit the dataset. During the search, AutoML will explore different combinations of model type, model parameters and model architecture.An effective AutoML solution offers several advantages over constructing and tuning ML models by hand. AutoML can assist with many of the difficult aspects of ML, such as avoiding overfitting and underfitting, imbalanced data, detecting data leakage and other potential issues with the problem setup, and automatically applying best-practice data cleaning, feature engineering, feature selection and various modeling techniques. AutoML can also leverage search algorithms to optimally sweep the hyperparameter search space, resulting in model performance which would be difficult to achieve by manual training. AutoML in EvalMLEvalML supports all of the above and more.In its simplest usage, the AutoML search interface requires only the input data, the target data and a `problem_type` specifying what kind of supervised ML problem to model.** Graphing methods, like AutoMLSearch, on Jupyter Notebook and Jupyter Lab require [ipywidgets](https://ipywidgets.readthedocs.io/en/latest/user_install.html) to be installed.** If graphing on Jupyter Lab, [jupyterlab-plotly](https://plotly.com/python/getting-started/jupyterlab-support-python-35) required. To download this, make sure you have [npm](https://nodejs.org/en/download/) installed. __Note:__ To provide data to EvalML, it is recommended that you create a `DataTable` object using [the Woodwork project](https://woodwork.alteryx.com/en/stable/).EvalML also accepts ``pandas`` input, and will run type inference on top of the input ``pandas`` data. If you'd like to change the types inferred by EvalML, you can use the `infer_feature_types` utility method as follows. The `infer_feature_types` utility method takes pandas or numpy input and converts it to a Woodwork data structure. It takes in a `feature_types` parameter which can be used to specify what types specific columns should be. In the example below, we specify that the provider, which would have otherwise been inferred as a column with natural language, is a categorical column.
###Code
import evalml
from evalml.utils import infer_feature_types
X, y = evalml.demos.load_fraud(n_rows=1000, return_pandas=True)
X = infer_feature_types(X, feature_types={'provider': 'categorical'})
###Output
_____no_output_____
###Markdown
Data ChecksBefore calling `AutoMLSearch.search`, we should run some sanity checks on our data to ensure that the input data being passed will not run into some common issues before running a potentially time-consuming search. EvalML has various data checks that makes this easy. Each data check will return a collection of warnings and errors if it detects potential issues with the input data. This allows users to inspect their data to avoid confusing errors that may arise during the search process. You can learn about each of the data checks available through our [data checks guide](data_checks.ipynb) Here, we will run the `DefaultDataChecks` class, which contains a series of data checks that are generally useful.
###Code
from evalml.data_checks import DefaultDataChecks
data_checks = DefaultDataChecks("binary", "log loss binary")
data_checks.validate(X, y)
###Output
_____no_output_____
###Markdown
Since there were no warnings or errors returned, we can safely continue with the search process.
###Code
automl = evalml.automl.AutoMLSearch(X_train=X, y_train=y, problem_type='binary')
automl.search()
###Output
_____no_output_____
###Markdown
The AutoML search will log its progress, reporting each pipeline and parameter set evaluated during the search.There are a number of mechanisms to control the AutoML search time. One way is to set the `max_batches` parameter which controls the maximum number of rounds of AutoML to evaluate, where each round may train and score a variable number of pipelines. Another way is to set the `max_iterations` parameter which controls the maximum number of candidate models to be evaluated during AutoML. By default, AutoML will search for a single batch. The first pipeline to be evaluated will always be a baseline model representing a trivial solution. The AutoML interface supports a variety of other parameters. For a comprehensive list, please [refer to the API reference.](../generated/evalml.automl.AutoMLSearch.ipynb) Detecting Problem TypeEvalML includes a simple method, `detect_problem_type`, to help determine the problem type given the target data. This function can return the predicted problem type as a ProblemType enum, choosing from ProblemType.BINARY, ProblemType.MULTICLASS, and ProblemType.REGRESSION. If the target data is invalid (for instance when there is only 1 unique label), the function will throw an error instead.
###Code
import pandas as pd
from evalml.problem_types import detect_problem_type
y = pd.Series([0, 1, 1, 0, 1, 1])
detect_problem_type(y)
###Output
_____no_output_____
###Markdown
Objective parameterAutoMLSearch takes in an `objective` parameter to determine which `objective` to optimize for. By default, this parameter is set to `auto`, which allows AutoML to choose `LogLossBinary` for binary classification problems, `LogLossMulticlass` for multiclass classification problems, and `R2` for regression problems.It should be noted that the `objective` parameter is only used in ranking and helping choose the pipelines to iterate over, but is not used to optimize each individual pipeline during fit-time.To get the default objective for each problem type, you can use the `get_default_primary_search_objective` function.
###Code
from evalml.automl import get_default_primary_search_objective
binary_objective = get_default_primary_search_objective("binary")
multiclass_objective = get_default_primary_search_objective("multiclass")
regression_objective = get_default_primary_search_objective("regression")
print(binary_objective.name)
print(multiclass_objective.name)
print(regression_objective.name)
###Output
_____no_output_____
###Markdown
Using custom pipelinesEvalML's AutoML algorithm generates a set of pipelines to search with. To provide a custom set instead, set allowed_pipelines to a list of [custom pipeline](pipelines.ipynb) classes. Note: this will prevent AutoML from generating other pipelines to search over.
###Code
from evalml.pipelines import MulticlassClassificationPipeline
class CustomMulticlassClassificationPipeline(MulticlassClassificationPipeline):
component_graph = ['Simple Imputer', 'Random Forest Classifier']
automl_custom = evalml.automl.AutoMLSearch(X_train=X, y_train=y, problem_type='multiclass', allowed_pipelines=[CustomMulticlassClassificationPipeline])
###Output
_____no_output_____
###Markdown
Stopping the search earlyTo stop the search early, hit `Ctrl-C`. This will bring up a prompt asking for confirmation. Responding with `y` will immediately stop the search. Responding with `n` will continue the search. Callback functions``AutoMLSearch`` supports several callback functions, which can be specified as parameters when initializing an ``AutoMLSearch`` object. They are:- ``start_iteration_callback``- ``add_result_callback``- ``error_callback`` Start Iteration CallbackUsers can set ``start_iteration_callback`` to set what function is called before each pipeline training iteration. This callback function must take three positional parameters: the pipeline class, the pipeline parameters, and the ``AutoMLSearch`` object.
###Code
## start_iteration_callback example function
def start_iteration_callback_example(pipeline_class, pipeline_params, automl_obj):
print ("Training pipeline with the following parameters:", pipeline_params)
###Output
_____no_output_____
###Markdown
Add Result CallbackUsers can set ``add_result_callback`` to set what function is called after each pipeline training iteration. This callback function must take three positional parameters: a dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the ``AutoMLSearch`` object.
###Code
## add_result_callback example function
def add_result_callback_example(pipeline_results_dict, untrained_pipeline, automl_obj):
print ("Results for trained pipeline with the following parameters:", pipeline_results_dict)
###Output
_____no_output_____
###Markdown
Error CallbackUsers can set the ``error_callback`` to set what function called when `search()` errors and raises an ``Exception``. This callback function takes three positional parameters: the ``Exception raised``, the traceback, and the ``AutoMLSearch object``. This callback function must also accept ``kwargs``, so ``AutoMLSearch`` is able to pass along other parameters used by default.Evalml defines several error callback functions, which can be found under `evalml.automl.callbacks`. They are:- `silent_error_callback`- `raise_error_callback`- `log_and_save_error_callback`- `raise_and_save_error_callback`- `log_error_callback` (default used when ``error_callback`` is None)
###Code
# error_callback example; this is implemented in the evalml library
def raise_error_callback(exception, traceback, automl, **kwargs):
"""Raises the exception thrown by the AutoMLSearch object. Also logs the exception as an error."""
logger.error(f'AutoMLSearch raised a fatal exception: {str(exception)}')
logger.error("\n".join(traceback))
raise exception
###Output
_____no_output_____
###Markdown
View RankingsA summary of all the pipelines built can be returned as a pandas DataFrame which is sorted by score. The score column contains the average score across all cross-validation folds while the validation_score column is computed from the first cross-validation fold.
###Code
automl.rankings
###Output
_____no_output_____
###Markdown
Describe PipelineEach pipeline is given an `id`. We can get more information about any particular pipeline using that `id`. Here, we will get more information about the pipeline with `id = 1`.
###Code
automl.describe_pipeline(1)
###Output
_____no_output_____
###Markdown
Get PipelineWe can get the object of any pipeline via their `id` as well:
###Code
pipeline = automl.get_pipeline(1)
print(pipeline.name)
print(pipeline.parameters)
###Output
_____no_output_____
###Markdown
Get best pipelineIf you specifically want to get the best pipeline, there is a convenient accessor for that.The pipeline returned is already fitted on the input X, y data that we passed to AutoMLSearch. To turn off this default behavior, set `train_best_pipeline=False` when initializing AutoMLSearch.
###Code
best_pipeline = automl.best_pipeline
print(best_pipeline.name)
print(best_pipeline.parameters)
best_pipeline.predict(X)
###Output
_____no_output_____
###Markdown
Saving AutoMLSearch and pipelines from AutoMLSearchThere are two ways to save results from AutoMLSearch. - You can save the AutoMLSearch object itself, calling `.save()` to do so. This will allow you to save the AutoMLSearch state and reload all pipelines from this.- If you want to save a pipeline from AutoMLSearch for future use, pipeline classes themselves have a `.save()` method.
###Code
# saving the best pipeline using .save()
best_pipeline.save("file_path_here")
###Output
_____no_output_____
###Markdown
Limiting the AutoML Search SpaceThe AutoML search algorithm first trains each component in the pipeline with their default values. After the first iteration, it then tweaks the parameters of these components using the pre-defined hyperparameter ranges that these components have. To limit the search over certain hyperparameter ranges, you can specify a `pipeline_parameters` argument with your pipeline parameters. These parameters will also limit the hyperparameter search space. Hyperparameter ranges can be found through the [API reference](https://evalml.alteryx.com/en/stable/api_reference.html) for each component. Parameter arguments must be specified as dictionaries, but the associated values can be single values, list/tuples, or `skopt.space` Real, Integer, Categorical values.
###Code
from evalml import AutoMLSearch
from evalml.demos import load_fraud
from skopt.space import Categorical
from evalml.model_family import ModelFamily
import woodwork as ww
X, y = load_fraud(n_rows=1000)
# example of setting parameter to just one value
pipeline_hyperparameters = {'Imputer': {
'numeric_impute_strategy': 'mean'
}}
# limit the numeric impute strategy to include only `median` and `most_frequent`
# `mean` is the default value for this argument, but it doesn't need to be included in the specified hyperparameter range for this to work
pipeline_hyperparameters = {'Imputer': {
'numeric_impute_strategy': ['median', 'most_frequent']
}}
# example using skopt.space.Categorical
pipeline_hyperparameters = {'Imputer': {
'numeric_impute_strategy': Categorical(['median', 'most_frequent'])
}}
# using this pipeline parameter means that our Imputer components in the pipelines will only search through 'median' and 'most_frequent' stretegies for 'numeric_impute_strategy'
automl = AutoMLSearch(X_train=X, y_train=y, problem_type='binary', pipeline_parameters=pipeline_hyperparameters)
automl.search()
automl.best_pipeline.hyperparameters
###Output
_____no_output_____
###Markdown
Access raw resultsThe `AutoMLSearch` class records detailed results information under the `results` field, including information about the cross-validation scoring and parameters.
###Code
automl.results
###Output
_____no_output_____
###Markdown
Adding ensemble methods to AutoML Stacking[Stacking](https://en.wikipedia.org/wiki/Ensemble_learningStacking) is an ensemble machine learning algorithm that involves training a model to best combine the predictions of several base learning algorithms. First, each base learning algorithms is trained using the given data. Then, the combining algorithm or meta-learner is trained on the predictions made by those base learning algorithms to make a final prediction.AutoML enables stacking using the `ensembling` flag during initalization; this is set to `False` by default. The stacking ensemble pipeline runs in its own batch after a whole cycle of training has occurred (each allowed pipeline trains for one batch). Note that this means __a large number of iterations may need to run before the stacking ensemble runs__. It is also important to note that __only the first CV fold is calculated for stacking ensembles__ because the model internally uses CV folds.
###Code
X, y = evalml.demos.load_breast_cancer()
automl_with_ensembling = AutoMLSearch(X_train=X, y_train=y,
problem_type="binary",
allowed_model_families=[ModelFamily.RANDOM_FOREST, ModelFamily.LINEAR_MODEL],
max_batches=5,
ensembling=True)
automl_with_ensembling.search()
###Output
_____no_output_____
###Markdown
We can view more information about the stacking ensemble pipeline (which was the best performing pipeline) by calling `.describe()`.
###Code
automl_with_ensembling.best_pipeline.describe()
###Output
_____no_output_____ |
Notebooks/02_Prepare_Dataset.ipynb | ###Markdown
Prepare Dataset by Alejandro Vega & Ian Flores Loading Dependencies
###Code
%matplotlib inline
from six.moves import cPickle as pickle
import os
import shutil
from PIL import Image, ImageOps
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display as disp # conflicting library function names
from IPython.display import Image as Im # conflicting library function names
from scipy import ndimage
import random
###Output
_____no_output_____
###Markdown
Cropping Spectrograms Given the architectures we are using in our models, we want all spectrograms to have the same size, because the models don't allow for dynamic size input.
###Code
def squareAndGrayImage(image, size, path, species, name):
# open our image and convert to grayscale
# (needed since color channels add a third dimmension)
im = Image.open(image).convert('L')
# dimmensions of square image
size = (size,size)
# resize our image and adjust if image is not square. save our image
squared_image = ImageOps.fit(im, size, Image.ANTIALIAS)
squared_image.save(path + '/' + species + '/squared_' + name)
#print(ndimage.imread(path + '/' + species + '/squared_' + name).shape)
def squareAndGrayProcess(size, dataset_path, new_dataset_path):
# if our dataset doesn't exist create it, otherwise overwrite
if not os.path.exists(new_dataset_path):
os.makedirs(new_dataset_path)
else:
shutil.rmtree(new_dataset_path)
os.makedirs(new_dataset_path)
# get a list of species folders in our dataset
species_dataset = os.listdir(dataset_path)
for species in species_dataset:
os.makedirs(new_dataset_path + '/' + species)
species_images = os.listdir(dataset_path + '/' + species)
for image in species_images:
image_path = dataset_path + '/' + species + '/' + image
squareAndGrayImage(image_path, size, new_dataset_path, species, image)
size = 224
dataset_path = '../dataset/spectrogram_roi_dataset'
new_dataset_path = '../dataset/squared_spectrogram_roi_dataset'
squareAndGrayProcess(size, dataset_path, new_dataset_path)
os.mkdir("../dataset/augmented_spectrograms")
from scipy.ndimage.interpolation import shift
## Have to find a way to create and copy the old directory #########
#To shift UP up to 10 pixels
for folder in os.listdir(new_dataset_path):
species_pictures = os.listdir(new_dataset_path + '/' + folder)
os.makedirs('../dataset/augmented_spectrograms' + '/' + folder)
for image in species_pictures:
the_image = np.asarray(Image.open(new_dataset_path + '/' + folder + '/' + image))
for i in range(7):
pre_image = the_image.reshape((size,size))
shifted_image = shift(pre_image, [(i*(-1)), 0])
shifted_image = Image.fromarray(shifted_image)
shifted_image.save('../dataset/augmented_spectrograms/' + folder + '/' + 'shifted_up' + str(i) + '_' + image)
shifted_image.close()
#To shift down up to 10 pixels
for folder in os.listdir(new_dataset_path):
species_pictures = os.listdir(new_dataset_path + '/' + folder)
for image in species_pictures:
the_image = np.asarray(Image.open(new_dataset_path + '/' + folder + '/' + image))
for i in range(7):
pre_image = the_image.reshape((size,size))
shifted_image = shift(pre_image, [i, 0])
shifted_image = Image.fromarray(shifted_image)
shifted_image.save('../dataset/augmented_spectrograms/' + folder + '/' + 'shifted_down' + str(i) + '-' + image)
shifted_image.close()
#To shift to the left up to 10 pixels
for folder in os.listdir(new_dataset_path):
species_pictures = os.listdir(new_dataset_path + '/' + folder)
for image in species_pictures:
the_image = np.asarray(Image.open(new_dataset_path + '/' + folder + '/' + image))
for i in range(7):
pre_image = the_image.reshape((size,size))
shifted_image = shift(pre_image, [0, (i*(-1))])
shifted_image = Image.fromarray(shifted_image)
shifted_image.save('../dataset/augmented_spectrograms/' + folder + '/' + 'shifted_left' + str(i) + '-' + image)
shifted_image.close()
#To shift to the right up to 10 pixels
for folder in os.listdir(new_dataset_path):
species_pictures = os.listdir(new_dataset_path + '/' + folder)
for image in species_pictures:
the_image = np.asarray(Image.open(new_dataset_path + '/' + folder + '/' + image))
for i in range(7):
pre_image = the_image.reshape((size,size))
shifted_image = shift(pre_image, [0, i])
shifted_image = Image.fromarray(shifted_image)
shifted_image.save('../dataset/augmented_spectrograms/' + folder + '/' + 'shifted_right' + str(i) + '-' + image)
shifted_image.close()
new_dataset_path = '../dataset/augmented_spectrograms'
# Function for displaying a random photo from each class in a dataset
def displaySamples(dataset_folders):
# go through each class in the dataset
dataset = os.listdir(dataset_folders)
for folder in dataset:
imgs_path = dataset_folders + '/' + folder
imgs = os.listdir(imgs_path) # list all images in a class
sample = dataset_folders + '/' + folder + '/' + imgs[np.random.randint(len(imgs))] # path for a random image from a dataset class
name = sample.split('/')[-2]
print(name, 'sample :')
disp(Im(sample)) # display our sample
print("========================================")
print("Here's a random sample from each class in the training dataset:")
displaySamples(new_dataset_path)
def getDatasetFolders(dataset_path):
folders = os.listdir(dataset_path)
dataset_folders = []
for folder in folders:
dataset_folders.append(dataset_path + '/' + folder)
return dataset_folders
dataset_folders = getDatasetFolders(new_dataset_path)
image_size = 224 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_image(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size), dtype=np.float32)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth
#print(image_data.shape)
# our images are RGBA so we would expect shape MxNx4
# see: https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.imread.html
if (image_data.shape != (image_size, image_size)):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
#if num_images < min_num_images:
# raise Exception('Many fewer images than expected: %d < %d' % (num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
###Output
_____no_output_____
###Markdown
Pickling Data We want to pickle the data by species, allowing for control of the minimum images per class. Beware that this will drastically influence the performance of your model.
###Code
def maybe_pickle(data_folders, min_num_images_per_class, pickles_path, force=False):
if not os.path.exists(pickles_path):
os.makedirs(pickles_path)
else:
shutil.rmtree(pickles_path)
os.makedirs(pickles_path)
dataset_names = []
for folder in data_folders:
class_name = folder.split('/')[-1] # species name
set_filename = pickles_path + '/' + class_name + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
image_files = os.listdir(folder)
count = 0
for image in image_files:
count +=1
if count >= min_num_images_per_class:
print('Pickling %s.' % set_filename)
dataset = load_image(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
min_imgs_per_class = 32
pickles_path = '../dataset/pickle_data'
datasets = maybe_pickle(dataset_folders, min_imgs_per_class, pickles_path)
pickles = getDatasetFolders('../dataset/pickle_data')
print(datasets)
length = len(pickles)
print(f'We have {length} pickles')
###Output
['../dataset/pickle_data/Microcerculus marginatus.pickle', '../dataset/pickle_data/Eleutherodactylus cooki.pickle', '../dataset/pickle_data/Eleutherodactylus cochranae.pickle', '../dataset/pickle_data/Hypocnemis subflava.pickle', '../dataset/pickle_data/Formicarius analis.pickle', '../dataset/pickle_data/Basileuterus bivittatus.pickle', '../dataset/pickle_data/Megascops nudipes.pickle', '../dataset/pickle_data/Myrmoborus leucophrys.pickle', '../dataset/pickle_data/Thamnophilus schistaceus.pickle', '../dataset/pickle_data/Eleutherodactylus brittoni.pickle', '../dataset/pickle_data/Liosceles thoracicus.pickle', '../dataset/pickle_data/Eleutherodactylus juanariveroi.pickle', '../dataset/pickle_data/Saltator grossus.pickle', '../dataset/pickle_data/Epinephelus guttatus.pickle', '../dataset/pickle_data/Chlorothraupis carmioli.pickle', '../dataset/pickle_data/Myrmeciza hemimelaena.pickle', '../dataset/pickle_data/Unknown Insect.pickle', '../dataset/pickle_data/Percnostola lophotes.pickle', '../dataset/pickle_data/Basileuterus chrysogaster.pickle', '../dataset/pickle_data/Megascops guatemalae.pickle', '../dataset/pickle_data/Eleutherodactylus coqui.pickle']
We have 21 pickles
###Markdown
Classes We have to evaluate the number of classes and how are they distributed. Also, observe which species has a higher frequency, etc.
###Code
# Calculates the total of images per class
def class_is_balanced(pickles):
total = 0
for pckle in pickles:
if (os.path.isfile(pckle)):
pickle_class = pickle.load(open(pckle, "rb"))
else:
print("Error reading dataset %s. Exiting.", pickle_path)
return -1
class_name = pckle.split('/')[-1].split('.')[0]
print("The total number of images in class %s is: %d" % (class_name, len(pickle_class)))
total += len(pickle_class)
print("For the dataset to be balanced, each class should have approximately %d images.\n" % (total / len(pickles)))
return (total // len(pickles))
print("Let's see if the dataset is balanced:")
balance_num = class_is_balanced(pickles)
def getBalancedClasses(pickle_files, balanced_num):
pickle_paths = []
total = 0
for pckle in pickle_files:
if (os.path.isfile(pckle)):
pickle_class = pickle.load(open(pckle, "rb"))
else:
print("Error reading dataset %s. Exiting.", pickle_path)
return -1
if (len(pickle_class) >= balance_num):
total += len(pickle_class)
pickle_paths.append(pckle)
return pickle_paths, total
true_pickles, total_balanced = getBalancedClasses(pickles, balance_num)
print("balanced dataset is ", true_pickles)
print("Total images are ", total_balanced)
def getLargestClass(pickles):
num_images = 0
class_info = []
for index,pckle in enumerate(pickles):
if (os.path.isfile(pckle)):
pickle_class = pickle.load(open(pckle, "rb"))
else:
print("Error reading dataset %s. Exiting.", pickle_path)
return -1
class_name = pckle.split('/')[-1].split('.')[0]
if(len(pickle_class) > num_images):
num_images = len(pickle_class)
class_info = [index, class_name, num_images]
print("Largest dataset is {} with {} images".format(class_info[1], class_info[2]))
return class_info
class_info = getLargestClass(true_pickles)
def findMinClass(dataset_path):
minm = float('inf')
species = ''
for folder in dataset_folders:
images= os.listdir(folder)
count = len(images)
if (count < minm):
minm = count
species = folder.split('/')[-1]
return (species, minm)
min_class = findMinClass(true_pickles)
print(min_class)
# go through our pickles, load them, shuffle them, and choose class_size amount of the images
def makeSubClasses(class_size, pickle_path, pickle_files):
# create path for folder of pickles of subsets of classes
if not os.path.exists(pickle_path):
os.makedirs(pickle_path)
else:
shutil.rmtree(pickle_path)
os.makedirs(pickle_path)
# list of pickles of subsets of classes
subclasses = []
for pickle_file in pickle_files:
try:
with open(pickle_file, 'rb') as f:
name = pickle_path + pickle_file.split('/')[-1].split('.')[0] + '_subset.pickle'
species_set = pickle.load(f) # set of images from species
# let's shuffle the letters to have random subset
np.random.shuffle(species_set)
species_set = species_set[:class_size,:,:]
try:
with open(name, 'wb') as f:
pickle.dump(species_set, f, pickle.HIGHEST_PROTOCOL)
subclasses.append(name)
except Exception as e:
print('Unable to save data to', name, ':', e)
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
pass
return subclasses
pickle_subclasses = makeSubClasses(56, '../dataset/subclasess_pickle_data/', pickles)
###Output
_____no_output_____
###Markdown
Training, Testing, and Validation Separation As with every implementation of Supervised Learning, we separate the dataset into three components. The training, the testing, and the validation dataset.
###Code
# our dataset is now balanced. calculate our training, validation, and test dataset sizes
total_images = len(pickle_subclasses) * 56
print("We have a total of {}.".format(total_images))
print("We'll split them 60/20/20 for training, validation, and testing respectively.")
print("Training dataset size: {}".format(int(total_images*0.6)))
print("Validation dataset size: {}".format(int(total_images*0.2)))
print("Testing dataset size: {}".format(int(total_images*0.2)))
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets_all(pickle_files, train_size, valid_size, test_size): # valid_size is 0 if not given as argument.
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
test_dataset, test_labels = make_arrays(test_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
tesize_per_class = test_size // num_classes
start_v, start_t, start_te = 0, 0, 0
end_v, end_t, end_te= vsize_per_class, tsize_per_class, tesize_per_class
end_l = vsize_per_class + tsize_per_class
end_tst = end_l + tesize_per_class
for label, pickle_file in enumerate(pickle_files):
#print(start_v, end_v)
#print(start_t, end_v)
name = (pickle_file.split('/')[-1]).split('.')[0]
try:
with open(pickle_file, 'rb') as f:
species_set = pickle.load(f) # set of images from species
# let's shuffle the letters to have random validation and training set
np.random.shuffle(species_set) # shuffle the data (the "images") in the pickle around
print("Valid dataset with", name, ". Has ", len(species_set), " images.")
print("Needs %d images per class" % vsize_per_class)
print("valid_species is species_set[:%d,:,:]" % vsize_per_class)
valid_species = species_set[:vsize_per_class, :, :]
print("valid_dataset[%d:%d,:,:] = valid_species" % (start_v,end_v))
valid_dataset[start_v:end_v, :, :] = valid_species
print("valid_labels[%d:%d] = %d" % (start_v,end_v,label))
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
print("Train dataset with", name, ". Has ", len(species_set), " images")
print("Needs %d images per class" % tsize_per_class)
print("train_species is species_set[%d:%d,:,:]" % (vsize_per_class,end_l))
train_species = species_set[vsize_per_class:end_l, :, :]
print("train_dataset[%d:%d,:,:] = train_species" % (start_t,end_t))
train_dataset[start_t:end_t, :, :] = train_species
print("train_labels[%d:%d] = %d" % (start_t,end_t,label))
train_labels[start_t:end_t] = label # give label to all images in class
start_t += tsize_per_class # offset start of class for next iteration
end_t += tsize_per_class # offset end of class for next iteration
print("Test dataset with", name, ". Has ", len(species_set), " images")
print("Needs %d images per class" % tesize_per_class)
print("test_species is species_set[%d:%d,:,:]" % (end_l, end_te))
test_species = species_set[end_l:end_tst, :, :]
print("test_dataset[%d:%d,:,:] = test_species" % (start_te,end_te))
test_dataset[start_te:end_te, :, :] = test_species
print("test_labels[%d:%d] = %d" % (start_te,end_te,label))
test_labels[start_te:end_te] = label # give label to all images in class
start_te += tesize_per_class # offset start of class for next iteration
end_te += tesize_per_class # offset end of class for next iteration
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
pass
return valid_dataset, valid_labels, train_dataset, train_labels, test_dataset, test_labels
train_size = int(total_images * 0.6)
valid_size = int(total_images * 0.2)
test_size = int(total_images * 0.2)
valid_dataset, valid_labels, train_dataset, train_labels, test_dataset, test_labels = merge_datasets_all(pickle_subclasses, train_size, valid_size, test_size)
# create dataset when dataset is not balanced, but want to use entire dataset
def merge_datasets_forced(pickle_files, train_size, valid_size=0): # valid_size is 0 if not given as argument.
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
#print(start_v, end_v)
#print(start_t, end_v)
name = (pickle_file.split('/')[-1]).split('.')[0]
try:
with open(pickle_file, 'rb') as f:
species_set = pickle.load(f) # set of images from species
if(len(species_set) < (tsize_per_class + vsize_per_class)):
# since our dataset is not balanced we need to make sure
# we're not taking more images than we have or dimmensions will not match
# reset our ends to previous state, calculate new images_per_class, and
# calculate new ends
end_v -= vsize_per_class
end_t -= tsize_per_class
tsize_per_class = len(species_set) // 2
vsize_per_class = len(species_set) // 2
end_v += vsize_per_class
end_t += tsize_per_class
end_l = vsize_per_class+tsize_per_class
# let's shuffle the letters to have random validation and training set
np.random.shuffle(species_set) # shuffle the data (the "images") in the pickle around
if valid_dataset is not None: # if not testing dataset
print("Valid dataset with", name, ". Has ", len(species_set), " images.")
print("Needs %d images per class" % vsize_per_class)
print("valid_species is species_set[:%d,:,:]" % vsize_per_class)
valid_species = species_set[:vsize_per_class, :, :,:]
print("valid_dataset[%d:%d,:,:] = valid_species" % (start_v,end_v))
valid_dataset[start_v:end_v, :, :,:] = valid_species
print("valid_labels[%d:%d] = %d" % (start_v,end_v,label))
valid_labels[start_v:end_v] = label
# increment our start by how many images we used for this class
start_v += vsize_per_class
# assume next class will have the required images_per_class
end_v += (valid_size // num_classes)
# can't reset vsize_per_class here since
# the training dataset needs it's current state
print("Train dataset with", name, ". Has ", len(species_set), " images")
print("Needs %d images per class" % tsize_per_class)
print("train_species is species_set[%d:%d,:,:]" % (vsize_per_class,end_l))
train_species = species_set[vsize_per_class:end_l, :, :,:]
print("train_dataset[%d:%d,:,:] = train_species" % (start_t,end_t))
train_dataset[start_t:end_t, :, :,:] = train_species
print("train_labels[%d:%d] = %d" % (start_t,end_t,label))
train_labels[start_t:end_t] = label # give label to all images in class
# increment our start by how many images we used for this class
start_t += tsize_per_class
# assume next round will have required images_per_class
tsize_per_class = train_size // num_classes
end_t += tsize_per_class # offset end of class for next iteration
vsize_per_class = valid_size // num_classes
end_l = vsize_per_class+tsize_per_class
else: # we have enough images in this class to use our desired imgs_per_class
tsize_per_class = train_size // num_classes
vsize_per_class = valid_size // num_classes
end_l = vsize_per_class+tsize_per_class
# let's shuffle the letters to have random validation and training set
np.random.shuffle(species_set) # shuffle the data (the "images") in the pickle around
if valid_dataset is not None: # if not testing dataset
print("Valid dataset with", name, ". Has ", len(species_set), " images.")
print("Needs %d images per class" % vsize_per_class)
print("valid_species is species_set[:%d,:,:]" % vsize_per_class)
valid_species = species_set[:vsize_per_class, :, :,:]
print("valid_dataset[%d:%d,:,:] = valid_species" % (start_v,end_v))
valid_dataset[start_v:end_v, :, :,:] = valid_species
print("valid_labels[%d:%d] = %d" % (start_v,end_v,label))
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
print("Train dataset with", name, ". Has ", len(species_set), " images")
print("Needs %d images per class" % tsize_per_class)
print("train_species is species_set[%d:%d,:,:]" % (vsize_per_class,end_l))
train_species = species_set[vsize_per_class:end_l, :, :,:]
print("train_dataset[%d:%d,:,:] = train_species" % (start_t,end_t))
train_dataset[start_t:end_t, :, :,:] = train_species
print("train_labels[%d:%d] = %d" % (start_t,end_t,label))
train_labels[start_t:end_t] = label # give label to all images in class
start_t += tsize_per_class # offset start of class for next iteration
end_t += tsize_per_class # offset end of class for next iteration
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
print()
return valid_dataset, valid_labels, train_dataset, train_labels
def genLabelMap(pickle_files):
label_map = {}
for label, pickle_file in enumerate(pickle_files):
name = (pickle_file.split('/')[-1]).split('.')[0]
label_map[label] = name
return label_map
def sampleCheck(dataset, labels, label_map):
i = random.randint(1, 5)
for p_i, img in enumerate(random.sample(range(len(labels)), 5*i)):
plt.subplot(i, 5, p_i+1)
plt.axis('off')
label = labels[img]
species = label_map[label]
#print(species)
title = species + ' sample:'
plt.title(title)
plt.imshow(dataset[img])
plt.show()
label_map = genLabelMap(pickle_subclasses)
sampleCheck(train_dataset, train_labels,label_map)
###Output
_____no_output_____
###Markdown
Output Data We output the data in a pickle format, to be used next on the models.
###Code
train_labels
pickle_file = '../dataset/arbimon_VGG.pickle'
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL) # save all out datasets in one pickle
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
###Output
_____no_output_____ |
Chapter4_Making_Predictions.ipynb | ###Markdown
###Code
import torch
w = torch.tensor(3.0, requires_grad=True)
b = torch.tensor(1.0, requires_grad=True)
def forward(x):
y = w*x + b
return y
x = torch.tensor([[4],[7]])
forward(x)
###Output
_____no_output_____ |
notebooks/M2-Exercise-01-cross_validation.ipynb | ###Markdown
📝 Exercise M2.01The aim of this exercise is to make the following experiments:* train and test a support vector machine classifier through cross-validation;* study the effect of the parameter gamma of this classifier using a validation curve;* study if it would be useful in term of classification if we could add new samples in the dataset using a learning curve.To make these experiments we will first load the blood transfusion dataset. NoteIf you want a deeper overview regarding this dataset, you can refer to theAppendix - Datasets description section at the end of this MOOC.
###Code
import pandas as pd
blood_transfusion = pd.read_csv("../datasets/blood_transfusion.csv")
data = blood_transfusion.drop(columns="Class")
target = blood_transfusion["Class"]
###Output
_____no_output_____
###Markdown
We will use a support vector machine classifier (SVM). In its most simpleform, a SVM classifier is a linear classifier behaving similarly to alogistic regression. Indeed, the optimization used to find the optimalweights of the linear model are different but we don't need to know thesedetails for the exercise.Also, this classifier can become more flexible/expressive by using aso-called kernel making the model becomes non-linear. Again, no requirementregarding the mathematics is required to accomplish this exercise.We will use an RBF kernel where a parameter `gamma` allows to tune theflexibility of the model.First let's create a predictive pipeline made of:* a [`sklearn.preprocessing.StandardScaler`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html) with default parameter;* a [`sklearn.svm.SVC`](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html) where the parameter `kernel` could be set to `"rbf"`. Note that this is the default.
###Code
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import ShuffleSplit, cross_validate
from sklearn.svm import SVC
model = make_pipeline(StandardScaler(), SVC())
###Output
_____no_output_____
###Markdown
Evaluate the statistical performance of your model by cross-validation with a`ShuffleSplit` scheme. Thus, you can use[`sklearn.model_selection.cross_validate`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html)and pass a [`sklearn.model_selection.ShuffleSplit`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.ShuffleSplit.html)to the `cv` parameter. Only fix the `random_state=0` in the `ShuffleSplit`and let the other parameters to the default.
###Code
cv = ShuffleSplit(random_state=0)
cv_results = cross_validate(model, data, target, cv=cv, n_jobs=2)
cv_results = pd.DataFrame(cv_results)
cv_results
###Output
_____no_output_____
###Markdown
As previously mentioned, the parameter `gamma` is one of the parametercontrolling under/over-fitting in support vector machine with an RBF kernel.Compute the validation curve(using [`sklearn.model_selection.validation_curve`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.validation_curve.html))to evaluate the effect of the parameter `gamma`. You can vary its valuebetween `10e-3` and `10e2` by generating samples on a logarithmic scale.Thus, you can use `np.logspace(-3, 2, num=30)`.Since we are manipulating a `Pipeline` the parameter name will be set to`svc__gamma` instead of only `gamma`. You can retrieve the parameter nameusing `model.get_params().keys()`. We will go more into details regardingaccessing and setting hyperparameter in the next section.
###Code
import numpy as np
vary_gamma = np.logspace(-3,2,num=30)
vary_gamma
###Output
_____no_output_____
###Markdown
Plot the validation curve for the train and test scores.
###Code
from sklearn.model_selection import learning_curve, validation_curve
train_scores, test_scores = validation_curve(
model, data, target, param_name='svc__gamma', param_range=vary_gamma, cv=cv,
n_jobs=2)
import matplotlib.pyplot as plt
plt.errorbar(vary_gamma, train_scores.mean(axis=1),
yerr=train_scores.std(axis=1), label='Training Score')
plt.errorbar(vary_gamma, test_scores.mean(axis=1),
yerr=test_scores.std(axis=1), label = 'Testing score')
plt.legend()
plt.xscale('log')
plt.xlabel('Value of the hyperparameter $\gamma$')
plt.ylabel('Accuracy score')
_ = plt.title('Validation curve for SVC')
###Output
_____no_output_____
###Markdown
Now, you can perform an analysis to check whether adding new samples to thedataset could help our model to better generalize. Compute the learning curve(using [`sklearn.model_selection.learning_curve`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.learning_curve.html))by computing the train and test scores for different training dataset size.Plot the train and test scores with respect to the number of samples.
###Code
from sklearn.model_selection import learning_curve
train_sizes = np.linspace(0.1, 1, num=10)
results = learning_curve(
model, data, target, train_sizes=train_sizes, cv=cv, n_jobs=2)
train_size, train_scores, test_scores = results[:3]
plt.errorbar(train_size, train_scores.mean(axis=1),
yerr=train_scores.std(axis=1), label='Training score')
plt.errorbar(train_size, test_scores.mean(axis=1),
yerr=test_scores.std(axis=1), label='Testing score')
plt.legend()
plt.xlabel("Number of samples in the training set")
plt.ylabel("Accuracy")
_ = plt.title("Learning curve for support vector machine")
###Output
_____no_output_____ |
doc/_build/Notebooks/2.Preprocess/2.1Merge.ipynb | ###Markdown
Preprocess MergeIn this notebook, it is shown how all seasonal forecasts are loaded into one xarray dataset. For the Siberian heatwave, we have retrieved 105 files (one for each of the 35 years and for each of the three lead times, ([see Retrieve](../1.Download/1.Retrieve.ipynb)). For the UK, we are able to use more forecasts, because the target month is shorter: one month as compared to three months for the Siberian example. We retrieved 5 leadtimes x 35 = 125 files. Each netcdf file contains 25 ensemble members, hence has the dimensions lat, lon, number (25 ensembles). Here we create an xarray dataset that also contains the dimensions time (35 years) and leadtime (5 initialization months). To generate this, we loop over lead times, and open all 35 years of the lead time and then concatenate those leadtimes.
###Code
##This is so variables get printed within jupyter
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import os
import sys
sys.path.insert(0, os.path.abspath('../../../'))
import src.cdsretrieve as retrieve
os.chdir(os.path.abspath('../../../'))
os.getcwd() #print the working directory
import xarray as xr
import numpy as np
def merge_SEAS5(folder, target_months):
init_months, leadtimes = retrieve._get_init_months(target_months)
print('Lead time: ' + "%.2i" % init_months[0])
SEAS5_ld1 = xr.open_mfdataset(
folder + '*' + "%.2i" % init_months[0] + '.nc',
combine='by_coords') # Load the first lead time
SEAS5 = SEAS5_ld1 # Create the xarray dataset to concatenate over
for init_month in init_months[1:len(init_months)]: ## Remove the first that we already have
print(init_month)
SEAS5_ld = xr.open_mfdataset(
folder + '*' + "%.2i" % init_month + '.nc',
combine='by_coords')
SEAS5 = xr.concat([SEAS5, SEAS5_ld], dim='leadtime')
SEAS5 = SEAS5.assign_coords(leadtime = np.arange(len(init_months)) + 2) # assign leadtime coordinates
return(SEAS5)
SEAS5_Siberia = merge_SEAS5(folder='../Siberia_example/SEAS5/',
target_months=[3, 4, 5])
SEAS5_Siberia
###Output
_____no_output_____
###Markdown
You can for example select a the lat, long, time, ensemble member and lead time as follows (add `.load()` to see the values):
###Code
SEAS5_Siberia.sel(latitude=60,
longitude=-10,
time='2000-03',
number=24,
leadtime=3).load()
SEAS5_UK = merge_SEAS5(folder = '../UK_example/SEAS5/', target_months = [2])
###Output
Lead time: 01
12
11
10
9
###Markdown
The SEAS5 total precipitation rate is in m/s. You can easily convert this and change the attributes.Click on the show/hide attributes button to see the assigned attributes.
###Code
SEAS5_UK['tprate'] = SEAS5_UK['tprate'] * 1000 * 3600 * 24 ## From m/s to mm/d
SEAS5_UK['tprate'].attrs = {'long_name': 'rainfall',
'units': 'mm/day',
'standard_name': 'thickness_of_rainfall_amount'}
SEAS5_UK
###Output
_____no_output_____ |
Interest rate modelling ( Ornstein-Uhlenbeck Process and Vasicek Model ).ipynb | ###Markdown
Ornstein-Uhlenbeck Process
###Code
import numpy as np
from numpy.random import normal
import matplotlib.pyplot as plt
def generate_process(dt = 0.1,theta = 1.2, mu=0.8, sigma = 0.5, n = 10000):
#x(t=0) = 0 and initialize x(t) with zeros
x = np.zeros(n)
for t in range(1,n):
x[t] = x[t-1] + theta*(mu - x[t-1])*dt + sigma*normal(0,np.sqrt(dt))
return x
def plot_process(x):
plt.plot(x)
plt.xlabel('t')
plt.ylabel('x(t)')
plt.title('Ornstein-Uhlenbeck Process')
plt.show()
if __name__ == '__main__':
data = generate_process()
plot_process(data)
###Output
_____no_output_____
###Markdown
Vasicek Model ( Using O-U process for mean reversion)
###Code
def vasicek_model(r0, kappa, theta, sigma, T=1, N = 1000): #kappa is speed of mean reversion of interest rate
dt = T/float(N) #theta is the mean interest rate to which the model converges
t = np.linspace(0,T,N+1) #sigma shows the volatility of the interest rate values
rates = [r0]
for _ in range(N):
dr = kappa*(theta-rates[-1])*dt + sigma*normal()*np.sqrt(dt)
rates.append(rates[-1]+dr)
return t,rates
def plot_models(t,r):
plt.plot(t,r)
plt.xlabel('t')
plt.ylabel('Interest Rate r(t)')
plt.title('Vasicek Model')
plt.show()
if __name__=='__main__':
t,r = vasicek_model(0.9,6,1,0.09)
plot_models(t,r)
###Output
_____no_output_____
###Markdown
Bond Pricing with Vasicek Model-2
###Code
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#we will simulate 1000's of r(t) interest rate processes
NUM_OF_SIMULATIONS = 1000
#these are the number of points in a single r(t) process
NUM_OF_POINTS = 200
def monte_carlo_simulation(x,r0, kappa, theta, sigma, T=1.): #T is time to maturity
dt = T/float(NUM_OF_POINTS)
result = []
for _ in range(NUM_OF_SIMULATIONS):
rates = [r0]
for _ in range(NUM_OF_POINTS):
dr = kappa*(theta-rates[-1])*dt + sigma*normal()*np.sqrt(dt)
rates.append(rates[-1]+dr)
result.append(rates)
simulation_dataframe = pd.DataFrame(result)
simulation_dataframe = simulation_dataframe.T
integral_sum = simulation_dataframe.sum()*dt
#present value of future cash flow
present_integral_sum = np.exp(-integral_sum)
#mean because the integral is the average
bond_price = x*np.mean(present_integral_sum)
print('Bond Price based on monte carlo simulation is $%.2f' %bond_price)
#plt.plot(simulation_dataframe.T)
#plt.show()
if __name__ == '__main__':
monte_carlo_simulation(1000,0.6,0.3,0.1,0.03)
###Output
Bond Price based on monte carlo simulation is $586.35
|
5. Logistic_Regression_Algorithm/Logistic_regression_(Titanic_dataset).ipynb | ###Markdown
###Code
from google.colab import files
uploaded = files.upload()
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
train.shape
test.shape
train.dtypes
train.head()
train.info()
train.describe()
test[test['Ticket']=='3701']
train.Ticket.value_counts()[:10]
train.groupby(['Pclass', 'Survived'])['Survived'].count()
train.isna().sum()
sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis')
sns.countplot(data=train,x='Survived',hue='Pclass')
sns.distplot(train['Age'].dropna(),bins=40)
sns.countplot(data=train,x='SibSp')
train['Fare'].hist(bins=40,figsize=(12,6))
train.corr()
sns.heatmap(train.corr(),annot=True)
plt.figure(figsize=(10,8))
sns.boxplot(x='Pclass', y='Age',data=train)
train.groupby('Pclass').mean()['Age'].round()
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 38
elif Pclass ==2:
return 30
else:
return 25
else:
return Age
train['Age'] = train[['Age','Pclass']].apply(impute_age,axis=1)
sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis')
del train['Cabin']
train.dropna(inplace=True)
sex = pd.get_dummies(train['Sex'],drop_first=True)
embark = pd.get_dummies(train['Embarked'],drop_first=True)
train = pd.concat([train,sex,embark],axis=1)
train.head()
train['Sex'] = train['Sex'].map({'male' : 0, 'female' : 1})
train.drop(['Name','Embarked','Ticket'],inplace=True,axis=1)
train.drop('PassengerId',axis=1,inplace=True)
sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis')
# mean_class1 = train.groupby('Pclass').mean()['Age'].round().loc[1]
# mean_class2 = train.groupby('Pclass').mean()['Age'].round().loc[2]
# mean_class3 = train.groupby('Pclass').mean()['Age'].round().loc[3]
# train.loc[train['Pclass']==1,'Age'] = train.loc[train['Pclass']==1,'Age'].fillna(value=mean_class1)
# train.loc[train['Pclass']==2,'Age'] = train.loc[train['Pclass']==2,'Age'].fillna(value=mean_class2)
# train.loc[train['Pclass']==3,'Age'] = train.loc[train['Pclass']==3,'Age'].fillna(value=mean_class3)
X = train.drop('Survived',axis=1)
y = train['Survived']
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
X_train,X_test,y_train,y_test = train_test_split(X,y, train_size = 0.75)
lg = LogisticRegression(solver='lbfgs', max_iter=1000)
lg.fit(X_train,y_train)
iters = [50,100,150,200,300,500]
for num in iters:
model = LogisticRegression(penalty='l2',max_iter=num,random_state=1,verbose=3)
model.fit(train_X,train_y)
preds = model.predict(val_X)
print("Accuracy for {} iterations is {}".format(num,accuracy_score(val_y,preds,normalize=True)))
predictions = lg.predict(X_test)
print(predictions)
from sklearn.metrics import classification_report
print(classification_report(y_test,predictions))
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test,predictions)
###Output
_____no_output_____ |
notebooks/supp_scatter_plots.ipynb | ###Markdown
Scatter Plots---Produce scatter plots for each canonical functional network comparison displayed in Figure 3.
###Code
# Define the limbic network, remove the NaNs:
limbic_network = np.nan_to_num(fc_dk_normalized.loc["Limbic"].values)
# create brain object
brain = Brain.Brain()
brain.add_connectome(data_dir)
brain.reorder_connectome(brain.connectome, brain.distance_matrix)
brain.bi_symmetric_c()
brain.reduce_extreme_dir()
###Output
_____no_output_____
###Markdown
Limbic Network Obtain complex Laplacian with optimized parameter, we will use the top 10 eigenmodes:
###Code
N = 10
# complex Laplacian eigenmodes:
brain.decompose_complex_laplacian(
alpha=parameters.loc["Limbic", "alpha"],
k=parameters.loc["Limbic", "wave_number"],
vis=False,
)
print(brain.norm_eigenmodes.shape)
# compute the spatial correlations:
corrs = np.squeeze(np.zeros([len(brain.norm_eigenmodes), 1]))
for e in np.arange(0, len(corrs)):
corrs[e] = pearsonr(np.squeeze(limbic_network), brain.norm_eigenmodes[:, e])[0]
# sort the eigenmodes:
ordered_eigenmodes = np.argsort(-np.round(corrs, 3))
#
selected_eigs = brain.norm_eigenmodes[:, ordered_eigenmodes[0 : N]]
coef, res, _, _ = np.linalg.lstsq(selected_eigs, limbic_network, rcond=None)
comb = selected_eigs @ coef # dot product
df_limbic = pd.DataFrame(np.concatenate((limbic_network[:, np.newaxis], comb[:, np.newaxis]), axis = 1), columns = ['limbic', 'eigenmode'])
markers = {""}
axis_limbic = sns.regplot(data=df_limbic, x="limbic", y="eigenmode")
###Output
(86, 86)
###Markdown
Default Network:
###Code
default_network = np.nan_to_num(fc_dk_normalized.loc["Default"].values)
# complex Laplacian eigenmodes:
brain.decompose_complex_laplacian(
alpha=parameters.loc["Default", "alpha"],
k=parameters.loc["Default", "wave_number"],
vis=False,
)
# compute the spatial correlations:
corrs = np.squeeze(np.zeros([len(brain.norm_eigenmodes), 1]))
for e in np.arange(0, len(corrs)):
corrs[e] = pearsonr(np.squeeze(default_network), brain.norm_eigenmodes[:, e])[0]
# sort the eigenmodes:
ordered_eigenmodes = np.argsort(-np.round(corrs, 3))
#
selected_eigs = brain.norm_eigenmodes[:, ordered_eigenmodes[0 : N]]
coef, res, _, _ = np.linalg.lstsq(selected_eigs, default_network, rcond=None)
comb = selected_eigs @ coef # dot product
df_default = pd.DataFrame(np.concatenate((default_network[:, np.newaxis], comb[:, np.newaxis]), axis = 1), columns = ['default', 'eigenmode'])
markers = {""}
axis_default = sns.regplot(data=df_default, x="default", y="eigenmode")
###Output
_____no_output_____
###Markdown
Visual Network:
###Code
visual_network = np.nan_to_num(fc_dk_normalized.loc["Visual"].values)
# complex Laplacian eigenmodes:
brain.decompose_complex_laplacian(
alpha=parameters.loc["Visual", "alpha"],
k=parameters.loc["Visual", "wave_number"],
vis=False,
)
# compute the spatial correlations:
corrs = np.squeeze(np.zeros([len(brain.norm_eigenmodes), 1]))
for e in np.arange(0, len(corrs)):
corrs[e] = pearsonr(np.squeeze(visual_network), brain.norm_eigenmodes[:, e])[0]
# sort the eigenmodes:
ordered_eigenmodes = np.argsort(-np.round(corrs, 3))
#
selected_eigs = brain.norm_eigenmodes[:, ordered_eigenmodes[0 : N]]
coef, res, _, _ = np.linalg.lstsq(selected_eigs, visual_network, rcond=None)
comb = selected_eigs @ coef # dot product
df_visual = pd.DataFrame(np.concatenate((visual_network[:, np.newaxis], comb[:, np.newaxis]), axis = 1), columns = ['visual', 'eigenmode'])
markers = {""}
axis_visual = sns.regplot(data=df_visual, x="visual", y="eigenmode")
###Output
_____no_output_____
###Markdown
Frontoparietal network:
###Code
fronto_network = np.nan_to_num(fc_dk_normalized.loc["Frontoparietal"].values)
# complex Laplacian eigenmodes:
brain.decompose_complex_laplacian(
alpha=parameters.loc["Frontoparietal", "alpha"],
k=parameters.loc["Frontoparietal", "wave_number"],
vis=False,
)
# compute the spatial correlations:
corrs = np.squeeze(np.zeros([len(brain.norm_eigenmodes), 1]))
for e in np.arange(0, len(corrs)):
corrs[e] = pearsonr(np.squeeze(fronto_network), brain.norm_eigenmodes[:, e])[0]
# sort the eigenmodes:
ordered_eigenmodes = np.argsort(-np.round(corrs, 3))
#
selected_eigs = brain.norm_eigenmodes[:, ordered_eigenmodes[0 : N]]
coef, res, _, _ = np.linalg.lstsq(selected_eigs, fronto_network, rcond=None)
comb = selected_eigs @ coef # dot product
df_fronto = pd.DataFrame(np.concatenate((fronto_network[:, np.newaxis], comb[:, np.newaxis]), axis = 1), columns = ['frontoparietal', 'eigenmode'])
markers = {""}
axis_fronto = sns.regplot(data=df_fronto, x="frontoparietal", y="eigenmode")
###Output
_____no_output_____
###Markdown
Somatomotor Network:
###Code
motor_network = np.nan_to_num(fc_dk_normalized.loc["Somatomotor"].values)
# complex Laplacian eigenmodes:
brain.decompose_complex_laplacian(
alpha=parameters.loc["Somatomotor", "alpha"],
k=parameters.loc["Somatomotor", "wave_number"],
vis=False,
)
# compute the spatial correlations:
corrs = np.squeeze(np.zeros([len(brain.norm_eigenmodes), 1]))
for e in np.arange(0, len(corrs)):
corrs[e] = pearsonr(np.squeeze(motor_network), brain.norm_eigenmodes[:, e])[0]
# sort the eigenmodes:
ordered_eigenmodes = np.argsort(-np.round(corrs, 3))
#
selected_eigs = brain.norm_eigenmodes[:, ordered_eigenmodes[0 : N]]
coef, res, _, _ = np.linalg.lstsq(selected_eigs, motor_network, rcond=None)
comb = selected_eigs @ coef # dot product
df_motor = pd.DataFrame(np.concatenate((motor_network[:, np.newaxis], comb[:, np.newaxis]), axis = 1), columns = ['somatomotor', 'eigenmode'])
markers = {""}
axis_motor = sns.regplot(data=df_motor, x="somatomotor", y="eigenmode")
dorsal_network = np.nan_to_num(fc_dk_normalized.loc["Dorsal_Attention"].values)
# complex Laplacian eigenmodes:
brain.decompose_complex_laplacian(
alpha=parameters.loc["Dorsal_Attention", "alpha"],
k=parameters.loc["Dorsal_Attention", "wave_number"],
vis=False,
)
# compute the spatial correlations:
corrs = np.squeeze(np.zeros([len(brain.norm_eigenmodes), 1]))
for e in np.arange(0, len(corrs)):
corrs[e] = pearsonr(np.squeeze(dorsal_network), brain.norm_eigenmodes[:, e])[0]
# sort the eigenmodes:
ordered_eigenmodes = np.argsort(-np.round(corrs, 3))
#
selected_eigs = brain.norm_eigenmodes[:, ordered_eigenmodes[0 : N]]
coef, res, _, _ = np.linalg.lstsq(selected_eigs, dorsal_network, rcond=None)
comb = selected_eigs @ coef # dot product
df_dorsal = pd.DataFrame(np.concatenate((dorsal_network[:, np.newaxis], comb[:, np.newaxis]), axis = 1), columns = ['Dorsal_Attention', 'eigenmode'])
markers = {""}
axis_dorsal = sns.regplot(data=df_dorsal, x="Dorsal_Attention", y="eigenmode")
ventral_network = np.nan_to_num(fc_dk_normalized.loc["Ventral_Attention"].values)
# complex Laplacian eigenmodes:
brain.decompose_complex_laplacian(
alpha=parameters.loc["Ventral_Attention", "alpha"],
k=parameters.loc["Ventral_Attention", "wave_number"],
vis=False,
)
# compute the spatial correlations:
corrs = np.squeeze(np.zeros([len(brain.norm_eigenmodes), 1]))
for e in np.arange(0, len(corrs)):
corrs[e] = pearsonr(np.squeeze(ventral_network), brain.norm_eigenmodes[:, e])[0]
# sort the eigenmodes:
ordered_eigenmodes = np.argsort(-np.round(corrs, 3))
#
selected_eigs = brain.norm_eigenmodes[:, ordered_eigenmodes[0 : N]]
coef, res, _, _ = np.linalg.lstsq(selected_eigs, ventral_network, rcond=None)
comb = selected_eigs @ coef # dot product
df_ventral = pd.DataFrame(np.concatenate((ventral_network[:, np.newaxis], comb[:, np.newaxis]), axis = 1), columns = ['Ventral_Attention', 'eigenmode'])
markers = {""}
axis_ventral = sns.regplot(data=df_ventral, x="Ventral_Attention", y="eigenmode")
axis_ventral.get_figure()
import matplotlib.pyplot as plt
fig,ax = plt.subplots(1, 7, figsize=(12,5), sharey=True)
sns.regplot(ax=ax[0], data=df_limbic, x="limbic", y="eigenmode")
ax[0].set(xlabel='Limbic', ylabel = 'Complex Laplacian Eigenmode')
sns.regplot(ax=ax[1], data=df_default, x="default", y="eigenmode")
ax[1].set(xlabel='Default', ylabel = '')
sns.regplot(ax=ax[2], data=df_visual, x="visual", y="eigenmode")
ax[2].set(xlabel='Visual', ylabel = '')
sns.regplot(ax=ax[3], data=df_fronto, x="frontoparietal", y="eigenmode")
ax[3].set(xlabel='Frontoparietal', ylabel = '')
sns.regplot(ax=ax[4], data=df_motor, x="somatomotor", y="eigenmode")
ax[4].set(xlabel='Somatomotor', ylabel = '')
sns.regplot(ax=ax[5], data=df_dorsal, x="Dorsal_Attention", y="eigenmode")
ax[5].set(xlabel='Dorsal Attention', ylabel = '')
sns.regplot(ax=ax[6], data=df_ventral, x="Ventral_Attention", y="eigenmode")
ax[6].set(xlabel='Ventral Attention', ylabel = '')
plt.tight_layout()
plt.savefig("../figures/supp/scatter_plot.png", dpi=300, bbox_inches="tight")
###Output
_____no_output_____ |
train_lstm.ipynb | ###Markdown
Train LSTM Model Manually
###Code
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
import torch
from torch import nn
from models import HarLSTM, ModelUtils
from pl_data import HarDataModule
from utils import FeatUtils
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
1. Prepare features
###Code
data_dir_path = "./data/har_dataset"
norm_method = "std"
batch_size = 16
data_module = HarDataModule(data_dir_path,
batch_size=batch_size,
normalize=norm_method)
###Output
_____no_output_____
###Markdown
2. Define model structure and parameters
###Code
# Check if GPU is available
use_gpu = torch.cuda.is_available()
if(use_gpu):
print('Training on GPU!')
else:
print('No GPU available, training on CPU; consider making n_epochs very small.')
# Instantiate the model with hyperparams
input_size = 9
output_size = 6
n_hidden = 128
n_layers = 2
# Training params
epochs = 50
lr = 0.0001
net = HarLSTM(input_size, output_size, n_hidden=n_hidden, n_layers=n_layers)
print("Model information:")
print(net)
# Define loss and optimization functions
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
###Output
_____no_output_____
###Markdown
3. Train the model
###Code
train_loader = data_module.train_dataloader()
val_loader = data_module.val_dataloader()
train_stat_dict = ModelUtils.train_net(net, criterion, optimizer, train_loader, val_loader, batch_size, epochs,
use_gpu=use_gpu, print_every=100, clip=5)
ModelUtils.plot_loss_chart(train_stat_dict)
# Save a model
model_path = f"har_lstm_{batch_size}_ep{epochs}_{norm_method}.pt"
torch.save(net, model_path)
###Output
_____no_output_____
###Markdown
4. Test inference by a loaded model
###Code
loaded_net = torch.load(model_path, map_location="cpu")
test_loader = data_module.test_dataloader()
test_loss, test_labels, preds = ModelUtils.test_net(loaded_net, loaded_net.criterion, test_loader, batch_size, use_gpu=use_gpu)
acc = accuracy_score(test_labels, preds)
prec, recall, f1, _ = precision_recall_fscore_support(test_labels, preds, average="macro")
print(f"accuracy: {acc}")
print(f"precision: {prec}")
print(f"recall: {recall}")
print(f"f1: {f1}")
###Output
_____no_output_____ |
numpy.ipynb | ###Markdown
Pengantar NumpyNumpy library (module) digunakan hampir semua komputasi numerik yang menggunakan Python. Numpy merupakan library yang menyediakan komputasi performa tinggi untuk komputasi data struktur vector, matrix, atau dimensi lainnya yang lebih tinggi di python. hal ini dikarenakan numpy diimplementasikan menggunakan bahasa C dan Fortran.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Di library Numpy, array merupakan terminologi yang dipakai dalam menyebut Vektor, Matrik, atau dimensi lainnya Membuat Numpy arrayTerdapat beberapa cara untuk menginisialisasi numpy arrays, seperti:- menggunakan Python list or tuples- menggunakan fungsi yang didedikasikan untuk men-generete numpy array, seperti arange, linspace, dll- mendapatakan data dari sebuah file List Python => Numpy arraycontoh membuat sebuah vektor ataupun matrik dari Python List.
###Code
# vektor = parameter masukan untuk fungsi numpy array adalah Python list
vektor = np.array([1,2,3,4])
vektor
# matrik: parameter masukan untuk fungsi numpy array adalah nested Python list
matriks = np.array([[1, 2], [3, 4]])
matriks
type(vektor), type(matriks)
vektor.shape
matriks.shape
vektor.size
matriks.size
np.shape(vektor)
np.shape(matriks)
np.size(vektor)
np.size(matriks)
###Output
_____no_output_____
###Markdown
terlihat sama kan? numpy array vs Python list. Kenapa tidak menggunakan Python list untuk komputsi daripada membuat type array baru menggunakan numpy?Alasannya:- Python list sangat umum. nilai dari list dapat berupa object apapun. Python list merupakan dynamically typed. tidak support fungsi matematika seperti perkalian matriks, pengimplementasian hal tersebut untuk Python list sangat tidak efisien karena masalah dynamic type- Sedangkan numpy array merupakan statically typed dan homogen sehingga menjadikan numpy array memory efisien. type dari tiap element ditentukan saat numpy array dibuat. Dengan bertipe static type, implementasi untuk fungsi matematika seperti perkalian matriks dalam numpy array dapat dilakukan dengan cepat dikarenakan di compile dalam bahasa C dan Fortran.
###Code
matriks.dtype
vektor.dtype
matriks[0,0] ="tes"
###Output
_____no_output_____
###Markdown
cara mendefinisikan type data dari numpy array
###Code
matrik = np.array([[1, 2], [3, 4]], dtype=complex)
matrik
###Output
_____no_output_____
###Markdown
function => numpy array
###Code
#parameter (awal, akhir, selisi)
x = np.arange(0, 10, 1)
x
x= np.linspace(2, 3, 5)
x
# angka random dalam rentang[0,1] baris 5 kolomo 4
a =np.random.rand(5,4)
a
# matrik diagonal
a =np.diag([1,2,3])
a
a = np.zeros((3,3))
a
a = np.ones((3,3))
a
###Output
_____no_output_____
###Markdown
Arrays Initialization
###Code
# array creation can be done with list initialization
a = np.array([1, 2, 3, 4, 5, 6]) # 1-d array (vector)
print(a)
print(a.shape) # array dimensions
a2 = np.array([[1, 2, 2**2], [1, 3, 3**2], [1, 4, 4**2]]) # 2-d array (matrix) is initialized with list of lists
print(a2)
zeros_1d = np.zeros((6,)) # iniialize vector with zeroes
ones_1d = np.ones((6,)) # initialize vector with ones
print(zeros_1d)
print(ones_1d)
zeros_2d = np.zeros((6,3)) # iniialize 2-d array with zeroes
ones_2d = np.ones((6,3)) # initialize 2-d array with ones
print(zeros_2d)
print(ones_2d)
np.arange(30) # integer range vector
np.linspace(0, 20, num=5) # linearly spaced vector
print(np.random.rand(10) ) # (pseudo)random (uniformly distributed) number vector
print(np.random.rand(3, 3) ) # (pseudo)random (uniformly distributed) number 2d-array
print(np.random.randn(6) ) # (pseudo)random (normally distributed) number vector
print(np.random.randn(2, 2) ) # (pseudo)random (normally distributed) number 2d-array
###Output
[-0.01010281 -1.20077807 -0.4702973 0.27223269 0.42161409 0.28567025]
[[ 0.50785083 -1.16982011]
[-1.44057215 -1.23965709]]
###Markdown
Element-wise operations
###Code
# math operations work with arrays
print(a * 2) # scalar multiplication
print(a + 2) # scalar summation
print(a**2) # exponentiation
###Output
[ 2 4 6 8 10 12]
[3 4 5 6 7 8]
[ 1 4 9 16 25 36]
###Markdown
Pairwise operations
###Code
b = 2 + a
print("a = {}".format(a))
print("b = {}".format(b))
print("b + a = {}".format(b + a)) # vector pairwise summation
print("b - a = {}".format(b - a)) # vector pairwise subtraction
print("b * a = {}".format(b * a)) # vector pairwise multiplication
print("(a,b) = ", np.inner(b,a)) # inner product
print("(a,b) = ", sum(b*a)) # inner product (home edition)
###Output
a = [1 2 3 4 5 6]
b = [3 4 5 6 7 8]
b + a = [ 4 6 8 10 12 14]
b - a = [2 2 2 2 2 2]
b * a = [ 3 8 15 24 35 48]
(a,b) = 133
(a,b) = 133
###Markdown
Matrix operations
###Code
# original matrix (6 rows x 3 columns)
print(ones_2d)
print("original matrix dimensions = {}".format(ones_2d.shape) )
# transposed matrix (3 rows x 6 columns)
print(np.transpose(ones_2d))
print("original matrix dimensions = {}".format(np.transpose(ones_2d).shape) )
vandermonde = np.array([[1, 2, 2**2], [1, 3, 3**2], [1, 4, 4**2]]) # Vandermonde matrix
print(vandermonde)
vandermonde_inv = np.linalg.inv(vandermonde) # inverse Vandermonde matrix
print(vandermonde_inv)
print(np.matmul(vandermonde, vandermonde_inv)) # matrix multiplication
print(np.max(vandermonde))
###Output
16
###Markdown
###Code
from google.colab import drive
drive.mount("/content/gdrive")
import numpy as np
from google.colab import files
uploaded = files.upload()
data=np.loadtxt("populations.txt")
data
year,hares,lynxes,carrots=data.T
print(year)
populations = data[:, 1:]
populations
populations.std(axis=0)
np.argmax(populations,axis=1)
a=np.tile(np.arange(0,40,10),(3,1))
a
a=np.arange(4)
a
a=np.array([4,2,5,1])
a.sort()
a
a.argsort()
###Output
_____no_output_____
###Markdown
1. Import the numpy package under the name np
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
2. Print the numpy version and the configuration
###Code
print(np.__version__)
print(np.show_config)
###Output
1.19.5
<function show at 0x7fb7d9d9e680>
###Markdown
3. Create a null vector of size 10
###Code
a=np.zeros(10)
print(a)
###Output
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
###Markdown
4. How to find the memory size of any array
###Code
arr=np.array([1,2,3,4,5])
print(arr.nbytes)
###Output
40
###Markdown
5. How to get the documentation of the numpy add function from the command line?
###Code
print(np.info(np.add))
###Output
None
###Markdown
6. Create a null vector of size 10 but the fifth value which is 1
###Code
a=np.zeros(10)
a[4]=1
print(a)
###Output
[0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]
###Markdown
7. Create a vector with values ranging from 10 to 49
###Code
b=np.arange(10,50)
print(b)
###Output
[10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49]
###Markdown
8. Reverse a vector (first element becomes last)
###Code
b=b[::-1]
print(b)
###Output
[49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26
25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
###Markdown
9. Create a 3x3 matrix with values ranging from 0 to 8
###Code
np.matrix(np.arange(9).reshape((3,3)))
###Output
_____no_output_____
###Markdown
10. Find indices of non-zero elements from [1,2,0,0,4,0]
###Code
np.nonzero([1,2,0,0,4,0])
###Output
_____no_output_____
###Markdown
11. Create a 3x3 identity matrix
###Code
np.identity(3)
###Output
_____no_output_____
###Markdown
12. Create a 3x3x3 array with random values
###Code
np.random.random((3,3,3))
###Output
_____no_output_____
###Markdown
13. Create a 10x10 array with random values and find the minimum and maximum values
###Code
d=np.random.random((10,10))
print(d)
print(d.min())
print(d.max())
###Output
[[0.86256087 0.82257505 0.42385635 0.0959995 0.49332644 0.98687554
0.54667736 0.85443848 0.4148213 0.97824845]
[0.7463088 0.71082905 0.11159725 0.27031679 0.83522219 0.55301061
0.98755046 0.85662517 0.12392008 0.69709329]
[0.08866327 0.68608001 0.12145526 0.26941829 0.55161586 0.25188063
0.281082 0.28925267 0.49835597 0.29757086]
[0.19698071 0.12157515 0.32019363 0.8121472 0.29548218 0.28544225
0.73238705 0.92146534 0.94093449 0.69553179]
[0.93949073 0.83201431 0.56708346 0.54739955 0.44594591 0.55933353
0.26068855 0.25817774 0.8042561 0.22894735]
[0.52669327 0.84915812 0.93650442 0.89653673 0.00245399 0.27197554
0.5165206 0.45614991 0.90921658 0.90884196]
[0.7671988 0.84220901 0.53822008 0.32757061 0.58301174 0.82518365
0.36910882 0.39261215 0.41243249 0.6735715 ]
[0.31507843 0.08791196 0.11177386 0.68643488 0.39640082 0.58687923
0.51489113 0.20311043 0.6721267 0.13946521]
[0.21206942 0.90855719 0.02697234 0.07826872 0.38529841 0.64298445
0.55606702 0.61729122 0.16518652 0.27697677]
[0.13155421 0.32850159 0.10628633 0.43508685 0.26876023 0.60670827
0.32973229 0.54516366 0.25627178 0.36607845]]
0.002453985734082176
0.9875504590517233
###Markdown
14. Create a random vector of size 30 and find the mean value
###Code
e=np.random.random(30)
print(e.size)
print(e)
print(e.mean())
###Output
30
[0.55760169 0.58346544 0.40813489 0.71070357 0.45875809 0.41520285
0.08910801 0.93581533 0.69363646 0.50216689 0.87031563 0.74770414
0.13295915 0.04001612 0.56951911 0.6180316 0.70198016 0.85198425
0.24590386 0.33111125 0.77645724 0.76882223 0.61341714 0.18578252
0.57133326 0.61429009 0.1999935 0.67030671 0.86929055 0.03485312]
0.525622161015376
###Markdown
15. Create a 2d array with 1 on the border and 0 inside
###Code
f=np.ones((3,3))
f[1:-1,1:-1]=0
print(f)
###Output
[[1. 1. 1.]
[1. 0. 1.]
[1. 1. 1.]]
###Markdown
16. How to add a border (filled with 0's) around an existing array?
###Code
g=np.ones((5,5))
print(np.pad(g, pad_width=1, mode='constant',
constant_values=0))
###Output
[[0. 0. 0. 0. 0. 0. 0.]
[0. 1. 1. 1. 1. 1. 0.]
[0. 1. 1. 1. 1. 1. 0.]
[0. 1. 1. 1. 1. 1. 0.]
[0. 1. 1. 1. 1. 1. 0.]
[0. 1. 1. 1. 1. 1. 0.]
[0. 0. 0. 0. 0. 0. 0.]]
###Markdown
17. What is the result of the following expression?0 * np.nannp.nan == np.nannp.inf > np.nannp.nan - np.nannp.nan in set([np.nan])0.3 == 3 * 0.1
###Code
print(0 * np.nan)
print(np.nan == np.nan)
print(np.inf > np.nan)
print(np.nan - np.nan)
print(np.nan in set([np.nan]))
print(0.3 == 3 * 0.1)
###Output
nan
False
False
nan
True
False
###Markdown
18. Create a 5x5 matrix with values 1,2,3,4 just below the diagonal
###Code
h=np.zeros((5,5))
for i in range(4):
h[i+1,i]=i+1
print(h)
###Output
[[0. 0. 0. 0. 0.]
[1. 0. 0. 0. 0.]
[0. 2. 0. 0. 0.]
[0. 0. 3. 0. 0.]
[0. 0. 0. 4. 0.]]
###Markdown
19. Create a 8x8 matrix and fill it with a checkerboard pattern
###Code
i=np.zeros((8,8))
i[0:8:2,0:8:2]=1
i[1:9:2,1:9:2]=1
print(i)
###Output
[[1. 0. 1. 0. 1. 0. 1. 0.]
[0. 1. 0. 1. 0. 1. 0. 1.]
[1. 0. 1. 0. 1. 0. 1. 0.]
[0. 1. 0. 1. 0. 1. 0. 1.]
[1. 0. 1. 0. 1. 0. 1. 0.]
[0. 1. 0. 1. 0. 1. 0. 1.]
[1. 0. 1. 0. 1. 0. 1. 0.]
[0. 1. 0. 1. 0. 1. 0. 1.]]
###Markdown
20.Consider a (6,7,8) shape array, what is the index (x,y,z) of the 100th **element**
###Code
print(np.unravel_index(100,(6,7,8)))
###Output
(1, 5, 4)
###Markdown
21. Create a checkerboard 8x8 matrix using the tile function
###Code
arr=np.array([[1,0],[0,1]])
brr=np.tile(arr,(4,4))
print(brr)
###Output
[[1 0 1 0 1 0 1 0]
[0 1 0 1 0 1 0 1]
[1 0 1 0 1 0 1 0]
[0 1 0 1 0 1 0 1]
[1 0 1 0 1 0 1 0]
[0 1 0 1 0 1 0 1]
[1 0 1 0 1 0 1 0]
[0 1 0 1 0 1 0 1]]
###Markdown
22. Normalize a 5x5 random matrix
###Code
j=np.random.random((5,5))
print(j)
max=j.max()
min=j.min()
j=(j-min)/(max-min)
print(j)
###Output
[[0.03701512 0.57091578 0.53481707 0.47366542 0.87875786]
[0.16509701 0.76040974 0.63726475 0.34800586 0.13754738]
[0.33813019 0.27340579 0.60340911 0.39882968 0.93157307]
[0.10465377 0.15132163 0.73793215 0.81108101 0.32576181]
[0.0215033 0.88063055 0.70302455 0.3816121 0.25976946]]
[[0.01704464 0.60370369 0.56403782 0.49684335 0.94196576]
[0.15778319 0.81192285 0.67660906 0.35876651 0.12751119]
[0.34791497 0.27679469 0.6394079 0.41461258 1. ]
[0.09136714 0.14264656 0.7872241 0.86760129 0.33432438]
[0. 0.9440235 0.74886704 0.39569362 0.26181087]]
###Markdown
23. Create a custom dtype that describes a color as four unsigned bytes (RGBA)
###Code
color = np.dtype([("r", np.ubyte, 1),
("g", np.ubyte, 1),
("b", np.ubyte, 1),
("a", np.ubyte, 1)])
###Output
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:4: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
after removing the cwd from sys.path.
###Markdown
24. Multiply a 5x3 matrix by a 3x2 matrix (real matrix product)
###Code
arr1=np.full((5,3),4)
arr2=np.full((3,2),2)
print(np.matmul(arr1,arr2))
###Output
[[24 24]
[24 24]
[24 24]
[24 24]
[24 24]]
###Markdown
25. Given a 1D array, negate all elements which are between 3 and 8, in place.
###Code
arr = np.arange(15)
print(arr)
print("\n")
arr[(arr >= 3) & (arr<= 8)] = 0
print(arr)
###Output
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14]
[ 0 1 2 0 0 0 0 0 0 9 10 11 12 13 14]
###Markdown
26. What is the output of the following script?
###Code
print(sum(range(5),-1))
from numpy import *
print(sum(range(5),-1))
###Output
9
10
###Markdown
27.Consider an integer vector Z, which of these expressions are legal?
###Code
Z=3
print(Z**Z)
print(2 << Z >> 2)
print(Z <- Z)
print(1j*Z)
print(Z/1/1)
print(Z<Z>Z)
###Output
27
4
False
3j
3.0
False
###Markdown
28. What are the result of the following expressions?
###Code
print(np.array(0) / np.array(0))
print(np.array(0) // np.array(0))
print(np.array([np.nan]).astype(int).astype(float))
###Output
nan
0
[-9.22337204e+18]
###Markdown
29. How to round away from zero a float array ?
###Code
k=np.array([0.9,1.4,2.5,3.5,4.0,5.0199])
print(np.round(k,0))
###Output
[1. 1. 2. 4. 4. 5.]
###Markdown
30. How to find common values between two arrays?
###Code
l=np.arange((7))
m=np.arange((10))
print(np.intersect1d(l,m))
###Output
[0 1 2 3 4 5 6]
###Markdown
31. How to ignore all numpy warnings (not recommended)?
###Code
# Suicide mode on
defaults = np.seterr(all="ignore")
Z = np.ones(1) / 0
# Back to sanity
_ = np.seterr(**defaults)
###Output
_____no_output_____
###Markdown
32. Is the following expressions true?
###Code
print(np.sqrt(-1) == np.emath.sqrt(-1))
#print(np.sqrt(-1))
#print(np.emath.sqrt(-1))
###Output
False
###Markdown
33. How to get the dates of yesterday, today and tomorrow?
###Code
print("TODAY = ", np.datetime64('today', 'D'))
print("YESTERDAY = ", np.datetime64('today', 'D') - np.timedelta64(1, 'D'))
print("TOMORROW = ",np.datetime64('today', 'D') + np.timedelta64(1, 'D'))
###Output
TODAY = 2021-09-16
YESTERDAY = 2021-09-15
TOMORROW = 2021-09-17
###Markdown
NumPy is a Python library written in Python,C and C++Numpy provides array object which is 50x faster than Python listsArray object in NumPy is called ndarray $\color{gold}{\text{import numpy}}$
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
$\color{yellow}{\text{}}$ $\color{cyan}{\text{>create ndarray using list}}$
###Code
arr = np.array([2,3,4,2,4])
arr
###Output
_____no_output_____
###Markdown
$\color{cyan}{\text{>check type}}$
###Code
type(arr)
###Output
_____no_output_____
###Markdown
$\color{cyan}{\text{>create ndarray using tuple}}$
###Code
arr = np.array((1, 2, 3, 4, 5))
arr
###Output
_____no_output_____
###Markdown
$\color{cyan}{\text{>0-D, 1-D, 2-D, 3-D array}}$
###Code
arr = np.array(12) #0-D
arr
arr = np.array([1, 3, 2, 4]) #1-D
arr
arr = np.array([[1, 2, 1], [3, 5, 2]]) #2-D
arr
arr = np.array([[[1, 2, 2], [2, 5, 7]], [[4, 2, 6], [8, 4, 3]]])
arr
###Output
_____no_output_____
###Markdown
$\color{cyan}{\text{>check array dimension}}$
###Code
arr = np.array([[[1, 2, 2], [2, 5, 7]], [[4, 2, 6], [8, 4, 3]]])
print(arr.ndim)
###Output
_____no_output_____
###Markdown
$\color{yellow}{\text{}}$ $\color{cyan}{\text{>Access Array Elements}}$
###Code
arr = np.array([1, 2, 3, 4]) #1-D
arr[0]
arr = np.array([[1,2,3,4,5], [6,7,8,9,10]]) #2-D
arr[0,4]
arr = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) #3-D
arr[1, 1, 0]
###Output
_____no_output_____
###Markdown
$\color{cyan}{\text{>Negative Indexing}}$
###Code
arr = np.array([[1,2,3,4,5], [6,7,8,9,10]])
arr[-1, -5]
###Output
_____no_output_____
###Markdown
$\color{yellow}{\text{}}$
###Code
arr = np.array([1, 2, 3, 4, 5, 6, 7])
arr[:5]
arr = np.array([1, 2, 3, 4, 5, 6, 7])
arr[2:]
arr = np.array([1, 2, 3, 7, 5, 6, 7])
arr[3:4]
###Output
_____no_output_____
###Markdown
$\color{cyan}{\text{>2-D slicing}}$
###Code
arr = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
arr[0, 1:4]
arr = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])
print(arr[0:2, 2:4])
###Output
_____no_output_____
###Markdown
$\color{cyan}{\text{>Negative Indexing}}$
###Code
arr = np.array([1, 2, 3, 4, 5, 6, 7])
arr[-4:-2]
###Output
_____no_output_____
###Markdown
$\color{cyan}{\text{>Step}}$
###Code
arr = np.array([1, 2, 3, 4, 5, 6, 7])
arr[0:6:3]
###Output
_____no_output_____
###Markdown
$\color{yellow}{\text{}}$
###Code
arr = np.array([4, 2, 3, 4]) #Checking Data Type
arr.dtype
arr = np.array([4, 2, 3, 4,14444444444])
arr.dtype
arr = np.array(['apple', 'banana', 'cherry'])
arr.dtype
arr = np.array([1, 2, 3, 4], dtype='S') #array with data type string
print(arr)
print(arr.dtype)
arr = np.array([1.1, 2.1, 3.1]) #float to integer
arr1 = arr.astype(int)
print(arr1)
print(arr1.dtype)
arr = np.array([5, 1, 0, 1, 0])
arr1 = arr.astype(bool)
print(arr1)
print(arr1.dtype)
arr = np.array([1, 2, 3, 4, 5]) #copy
arr1 = arr.copy()
arr[3] = 9
print(arr)
print(arr1)
arr = np.array([1, 2, 3, 4, 5]) #view
arr1 = arr.view()
arr[3] = 9
print(arr)
print(arr1) #changes are affected
arr = np.array([1, 2, 3, 4, 5]) # ownership check
arr1 = arr.copy()
arr2 = arr.view()
print(arr1.base)
print(arr2.base)
###Output
_____no_output_____
###Markdown
$\color{yellow}{\text{}}$
###Code
arr = np.array([[1, 2, 3, 4],
[5, 6, 7, 8]])
print(arr.shape)
###Output
_____no_output_____
###Markdown
$\color{yellow}{\text{}}$
###Code
arr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
arr1 = arr.reshape(2, 6)
print(arr1)
arr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
arr1 = arr.reshape(2, 3, 2)
print(arr1)
arr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
arr1 = arr.reshape(2, 3, -1)
print(arr1)
arr = np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]]) #flattening
arr1 = arr.reshape(-1)
print(arr1)
###Output
_____no_output_____
###Markdown
$\color{yellow}{\text{}}$
###Code
arr = np.array([1, 2, 3])
for x in arr:
print(x,"",end="")
arr = np.array([[1, 2, 3], [4, 5, 6]])
for x in arr:
print(x,"",end="")
arr = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
for x in arr:
print(x)
arr = np.array([1, 2, 3])
for x in np.nditer(arr, flags=['buffered'], op_dtypes=['float']):
print(x)
###Output
_____no_output_____
###Markdown
$\color{yellow}{\text{}}$
###Code
arr1 = np.array([1, 2, 3])
arr2 = np.array([4, 5, 6])
arr = np.concatenate((arr1, arr2))
print(arr)
arr1 = np.array([[1, 2], [3, 4]])
arr2 = np.array([[5, 6], [7, 8]])
arr = np.concatenate((arr1, arr2), axis=1)
print(arr)
arr1 = np.array([1, 2, 3]) #elements one over another
arr2 = np.array([4, 5, 6])
arr = np.stack((arr1, arr2), axis=1)
print(arr)
arr1 = np.array([1, 2, 3])
arr2 = np.array([4, 5, 6])
arr = np.hstack((arr1, arr2))
print(arr)
arr1 = np.array([1, 2, 3])
arr2 = np.array([4, 5, 6])
arr = np.vstack((arr1, arr2))
print(arr)
arr1 = np.array([1, 2, 3])
arr2 = np.array([4, 5, 6])
arr = np.dstack((arr1, arr2))
print(arr)
###Output
_____no_output_____
###Markdown
$\color{yellow}{\text{}}$
###Code
arr = np.array([1, 2, 3, 4, 5, 6])
arr1 = np.array_split(arr, 2)
print(arr1)
print()
print(arr1[1])
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15], [16, 17, 18]])
newarr = np.array_split(arr, 3)
print(newarr)
###Output
_____no_output_____
###Markdown
$\color{yellow}{\text{}}$
###Code
arr = np.array([1, 2, 3, 4, 5, 4, 4])
x = np.where(arr==5)
print(x)
arr = np.array([1, 2, 3, 4, 5, 6, 7, 8]) #even values
x = np.where(arr%2==0)
print(x)
arr = np.array([1, 2, 3, 4, 5, 6, 7, 8]) #even values
x = np.where(arr%2==1)
print(x)
arr = np.array([1, 4, 6, 8])
x = np.searchsorted(arr, 7)
print(x)
arr = np.array([1, 4, 6, 8])
x = np.searchsorted(arr, 7, side='right' )
print(x)
arr = np.array([1, 3, 5, 7]) #insert multiple values
x = np.searchsorted(arr, [2, 4, 6])
print(x)
###Output
_____no_output_____
###Markdown
$\color{yellow}{\text{}}$
###Code
arr = np.array([5, 4, 2, 1])
print(np.sort(arr))
arr = np.array([[3, 2, 4], [5, 0, 1]])
print(np.sort(arr))
arr = np.array([True, False, True])
print(np.sort(arr))
arr = np.array(['Ab', 'Ac', 'P'])
print(np.sort(arr))
###Output
_____no_output_____
###Markdown
$\color{yellow}{\text{}}$
###Code
arr = np.array([1, 2, 3, 4])
x = [True, False, True, True]
arr1 = arr[x]
print(arr1)
arr = np.array([1, 2, 3, 4])
filter= []
for x in arr:
if x <=3:
filter.append(True)
else:
filter.append(False)
arr1 = arr[filter]
print(filter)
print(arr1)
arr = np.array([1, 2, 3, 4]) #even numbers
filter= []
for x in arr:
if x%2==1:
filter.append(True)
else:
filter.append(False)
arr1 = arr[filter]
print(filter)
print(arr1)
arr = np.array([11, 12, 22, 33, 44])
filter = arr%11==0
arr1 = arr[filter]
print(filter)
print(arr1)
###Output
_____no_output_____
###Markdown
Numpy Intro Reference:1. https://www.w3schools.com/python/numpy_intro.asp 2. https://numpy.org/ 3. http://www.learningaboutelectronics.com/Articles/How-to-create-an-array-of-random-integers-in-Python-with-numpy.php Getting started
###Code
import numpy as np
arr = np.array([1,2,3,4,5])
print(arr)
print(type(arr))
print(np.__version__)
###Output
[1 2 3 4 5]
<class 'numpy.ndarray'>
1.18.5
###Markdown
Using tuple to create array
###Code
arr = np.array((1,2,3,4,5,6,7))
print(arr)
###Output
[1 2 3 4 5 6 7]
###Markdown
Dimensions
###Code
# 0 dimensional, with single value
arr = np.array(42)
print(arr)
# check number of dimensions
print("# of dimensions : ", arr.ndim)
# 1 dimensional, with one array
arr = np.array([7,14,21,28,35,42])
print(arr)
# check number of dimensions
print("# of dimensions : ", arr.ndim)
# 2 dimensional, an array that has 1D arrays as elements
arr = np.array([
[1,2],
[3,4]
])
print(arr)
# check number of dimensions
print("# of dimensions : ", arr.ndim)
# 3 dimensional, an array that has 2D arrays as its elements
arr = np.array([
[
[1,2],
[3,4]
],
[
[5,6],
[7,9]
]
])
print(arr)
# check number of dimensions
print("# of dimensions : ", arr.ndim)
###Output
[[[1 2]
[3 4]]
[[5 6]
[7 9]]]
# of dimensions : 3
###Markdown
Creating higher dimensional array using numpy
###Code
arr = np.array([1,2,3,4], ndmin=5)
print(arr)
# check number of dimensions
print("# of dimensions : ", arr.ndim)
arr = np.array([
[[1,2,3,4],[5,6,7,8]],
[[11,12,13,14],[15,16,17,18]]
], ndmin=5)
print(arr)
# check number of dimensions
print("# of dimensions : ", arr.ndim)
###Output
[[[[[ 1 2 3 4]
[ 5 6 7 8]]
[[11 12 13 14]
[15 16 17 18]]]]]
# of dimensions : 5
###Markdown
Array indexingIndexing is used to access the elemets in the array
###Code
print('accessing the 1st level')
print('-'*20)
print(arr[0])
print('-'*20)
print('accessing the 2nd level')
print('-'*20)
print(arr[0][0])
print('-'*20)
print('accessing the 3rd level')
print('-'*20)
print(arr[0][0][0])
print('-'*20)
print('accessing the 4th level')
print('-'*20)
print(arr[0][0][1][1])
print('-'*20)
print('accessing the 5th level')
print('-'*20)
print(arr[0][0][1][1][3])
###Output
accessing the 1st level
--------------------
[[[[ 1 2 3 4]
[ 5 6 7 8]]
[[11 12 13 14]
[15 16 17 18]]]]
--------------------
accessing the 2nd level
--------------------
[[[ 1 2 3 4]
[ 5 6 7 8]]
[[11 12 13 14]
[15 16 17 18]]]
--------------------
accessing the 3rd level
--------------------
[[1 2 3 4]
[5 6 7 8]]
--------------------
accessing the 4th level
--------------------
[15 16 17 18]
--------------------
accessing the 5th level
--------------------
18
###Markdown
Array can be accessed by passing an array of indicesarr\[l,m,n,o\] => returns the 4D array element, if its present
###Code
print('accessing the 1st level')
print('-'*20)
print(arr[0])
print('-'*20)
print('accessing the 2nd level')
print('-'*20)
print(arr[0,0])
print('-'*20)
print('accessing the 3rd level')
print('-'*20)
print(arr[0,0,0])
print('-'*20)
print('accessing the 4th level')
print('-'*20)
print(arr[0,0,1,1])
print('-'*20)
print('accessing the 5th level')
print('-'*20)
print(arr[0,0,1,1,3])
###Output
accessing the 1st level
--------------------
[[[[ 1 2 3 4]
[ 5 6 7 8]]
[[11 12 13 14]
[15 16 17 18]]]]
--------------------
accessing the 2nd level
--------------------
[[[ 1 2 3 4]
[ 5 6 7 8]]
[[11 12 13 14]
[15 16 17 18]]]
--------------------
accessing the 3rd level
--------------------
[[1 2 3 4]
[5 6 7 8]]
--------------------
accessing the 4th level
--------------------
[15 16 17 18]
--------------------
accessing the 5th level
--------------------
18
###Markdown
Negative indexing
###Code
print('accessing the 3rd level, last element')
print('-'*20)
print(arr[0,0,-1])
print('-'*20)
print('accessing the 4th level, last element')
print('-'*20)
print(arr[0,0,-1,-1])
print('-'*20)
print('accessing the 5th level, second last element of the first element at 4th level')
print('-'*20)
print(arr[0,0,-1,-2,-2])
print('-'*20)
###Output
accessing the 3rd level, last element
--------------------
[[11 12 13 14]
[15 16 17 18]]
--------------------
accessing the 4th level, last element
--------------------
[15 16 17 18]
--------------------
accessing the 5th level, second last element of the first element at 4th level
--------------------
13
--------------------
###Markdown
Array slicingSlicing helps in taking elements from one index to another indexe.g.arr\[start:end\] arr\[start:end:step\] arr\[start:end,start,:end\] etc.
###Code
# 1D array
arr = np.random.randint(1,100,10)
print(arr)
print("# of dims",arr.ndim)
print("-"*20)
print("printing elements from 2nd to 4th")
print(arr[1:4])
print("printing elements from 4nd to end")
print(arr[4:])
print("printing elements till 7th")
print(arr[:7])
###Output
[25 7 53 87 28 72 17 93 34 62]
# of dims 1
--------------------
printing elements from 2nd to 4th
[ 7 53 87]
printing elements from 4nd to end
[28 72 17 93 34 62]
printing elements till 7th
[25 7 53 87 28 72 17]
###Markdown
Negative slicing
###Code
arr = np.random.randint(1,100,10)
print(arr)
print("# of dims",arr.ndim)
print("-"*20)
print("printing 3rd last elements till 2nd last elemet")
print(arr[-3:-1])
print("-"*20)
print("printing 6th last elements till the last elemet")
print(arr[-6:])
###Output
[73 72 21 10 8 46 24 27 53 77]
# of dims 1
--------------------
printing 3rd last elements till 2nd last elemet
[27 53]
--------------------
printing 6th last elements till the last elemet
[ 8 46 24 27 53 77]
###Markdown
using step
###Code
arr = np.random.randint(1,100,10)
print(arr)
print("# of dims",arr.ndim)
print("-"*20)
print("elements at the 2nd index steps")
print(arr[::2])
print("-"*20)
print("elements at the 2nd index steps from 1st till 5th element")
print(arr[:5:2])
print("-"*20)
print("elements at even 2nd indices steps from 1st till 5th element")
print(arr[1:5:2])
print("-"*20)
# 2D array
arr = np.random.randint(1,100, size=(2,5))
print(arr)
print("# of dims",arr.ndim)
print("-"*20)
print("printing elements from index 0 ")
print(arr[0:])
print("# of dims",arr[0:].ndim)
print("-"*20)
print("printing the first element from 2D array ")
print(arr[0:1])
print("# of dims",arr[0:1].ndim)
print("-"*20)
print("printing the second elements from 2D array ")
print(arr[1:])
print("# of dims",arr[1:].ndim)
print("-"*20)
print("printing the second element of the second element from 2D array ")
print(arr[1:])
print("# of dims",arr[1:,1:2].ndim)
print("-"*20)
# 2D array
arr = np.random.randint(1,100, size=(2,5))
print(arr)
print("# of dims",arr.ndim)
print("-"*20)
print("printing the 3rd and 4th elements from all rows of 2D array")
print(arr[0:,2:4])
print("# of dims",arr[0:,2:4].ndim)
print("-"*20)
print("printing the 3rd and 4th elements from the second row of 2D array")
print(arr[1:2,2:3])
print("# of dims",arr[1:2,2:3].ndim)
print("-"*20)
# 3D array
print("3 dimentional array, with 2 rows and 3 elements")
arr = np.random.randint(1,10, size=(3,2,3))
print(arr)
print("# of dims",arr.ndim)
print("-"*20)
print("3 dimentional array, with 5 rows and 7 elements")
arr = np.random.randint(1,100, size=(3,5,7))
print(arr)
print("# of dims",arr.ndim)
print("-"*20)
print("the first item in the 3D array using simple index gives 1D array")
print(arr[0])
print("# of dims",arr[0].ndim)
print("-"*20)
print("the first item in the 3D array, using slice gives 3D array")
print(arr[0:1])
print("# of dims",arr[0:1].ndim)
print("-"*20)
print("the second element in 3D array")
print(arr[1:2])
print("# of dims",arr[0:1].ndim)
print("-"*20)
print("the 3rd and 4th element from the second element in 3D array")
print(arr[1:2,2:4])
print("# of dims",arr[1:2,2:4].ndim)
print("-"*20)
print("the 6th element from the 3rd and 4th element from the second element in 3D array")
print(arr[1:2,2:4,6:7])
print("# of dims",arr[1:2,2:4, 6:7].ndim)
print("-"*20)
print("the 6th element, from the 5th element from the second element in 3D array")
"""
1. 1:2 idexes the 2nd element of 3D array
2. 4:5 indexes the 5th element of the array obtained as a result of step 1
3. 5:6 indexes the 6th element of the array obtained as a result of step 2
"""
print(arr[1:2,4:5,5:6])
print("# of dims",arr[1:2, 4:5, 5:6].ndim)
print("-"*20)
###Output
the 6th element from the 3rd and 4th element from the second element in 3D array
[[[25]
[72]]]
# of dims 3
--------------------
the 6th element from the 5th element from the second element in 3D array
[[[77]]]
# of dims 3
--------------------
###Markdown
Data typesBelow is a list of all data types in NumPy and the characters used to represent them. i - integer b - boolean u - unsigned integer f - float c - complex float m - timedelta M - datetime O - object S - string U - unicode string V - fixed chunk of memory for other type ( void )
###Code
arr = np.array([1, 2, 3, 4])
print(arr)
print("datatype is : ",arr.dtype)
print("-"*20)
arr = np.array(["apple", 'banana', 'mango', 'peach'])
print(arr)
print("datatype is : ",arr.dtype)
print("-"*20)
print("with a defined datatype")
arr = np.array([1,2,3,4], dtype="S")
print(arr)
print("datatype is : ",arr.dtype)
print("-"*20)
###Output
[1 2 3 4]
datatype is : int64
--------------------
['apple' 'banana' 'mango' 'peach']
datatype is : <U6
--------------------
with a defined datatype
[b'1' b'2' b'3' b'4']
datatype is : |S1
--------------------
###Markdown
For i, u, f, S and U we can define size as well.
###Code
arr = np.array([1, 2, 3, 4], dtype='i4')
print(arr)
print(arr.dtype)
print("-"*20)
arr = np.array([1, 2, 3, 4], dtype='i8')
print(arr)
print(arr.dtype)
print("-"*20)
###Output
[1 2 3 4]
int32
--------------------
[1 2 3 4]
int64
--------------------
###Markdown
Converting datatype of existing array
###Code
arr = np.array([1.1, 2.1, 3.1])
print('Original array : ',arr)
print(arr.dtype)
print('-'*20)
newarr = arr.astype('i')
print('New array',newarr)
print(newarr.dtype)
print('-'*20)
arr = np.array([1.1, 2.1, 3.1])
print('Original array : ',arr)
print(arr.dtype)
print('-'*20)
newarr = arr.astype(int)
print('New array',newarr)
print(newarr.dtype)
print('-'*20)
arr = np.array([1.1, 2.1, 0.0])
print('Original array : ',arr)
print(arr.dtype)
print('-'*20)
newarr = arr.astype(bool)
print('New array',newarr)
print(newarr.dtype)
print('-'*20)
arr = np.array([1.1, 2.1, 0.0])
print('Original array : ',arr)
print(arr.dtype)
print('-'*20)
newarr = arr.astype(str)
print('New array',newarr)
print(newarr.dtype)
print('-'*20)
###Output
Original array : [1.1 2.1 0. ]
float64
--------------------
New array ['1.1' '2.1' '0.0']
<U32
--------------------
###Markdown
Copy vs View
###Code
print("changing the element at index 0 of the original array has no effect on copy")
print("-"*20)
arr = np.array([1, 2, 3, 4, 5])
print("arr before changing the element :",arr)
x = arr.copy()
arr[0] = 42
print("arr :",arr)
print("x :",x)
print("changing the element at index 0 of the original array has effect on view")
print("-"*20)
arr = np.array([1, 2, 3, 4, 5])
print("arr before changing the element :",arr)
x = arr.view()
arr[0] = 42
print("arr :",arr)
print("x :",x)
###Output
changing the element at index 0 of the original array has effect on view
--------------------
arr before changing the element : [1 2 3 4 5]
arr : [42 2 3 4 5]
x : [42 2 3 4 5]
###Markdown
Every NumPy array has the attribute `base` that returns None if the array owns the data.
###Code
arr = np.array([1, 2, 3, 4, 5])
x = arr.copy()
y = arr.view()
print("base for copy : ",x.base)
print("base for view : ",y.base , ", => returns the original array")
###Output
base for copy : None
base for view : [1 2 3 4 5] , => returns the original array
###Markdown
Shape
###Code
print("3 dimentional array, with 5 rows and 7 elements")
arr = np.random.randint(1,100, size=(3,5,7))
print(arr)
print("# of dims",arr.ndim)
print("Shape of the array is : ", arr.shape)
print("Size of the array is : ", arr.size)
print("-"*20)
###Output
3 dimentional array, with 5 rows and 7 elements
[[[19 97 83 44 30 2 25]
[12 69 4 95 47 48 30]
[ 5 98 42 50 15 7 63]
[89 68 54 75 98 99 48]
[62 92 96 21 67 12 57]]
[[52 71 58 29 7 33 22]
[87 98 73 3 5 44 62]
[14 4 32 61 43 57 83]
[ 4 54 36 40 55 17 49]
[35 85 85 41 9 47 76]]
[[93 94 47 58 74 54 39]
[85 73 31 20 41 10 72]
[80 84 82 81 94 8 58]
[34 3 64 8 22 7 59]
[92 28 57 30 19 87 31]]]
# of dims 3
Shape of the array is : (3, 5, 7)
Size of the array is : 105
--------------------
###Markdown
Reshape
###Code
print("3 dimentional array, with 4 rows and 2 elements")
arr = np.random.randint(1,100, size=(3,4,2))
print(arr)
print("# of dims",arr.ndim)
print("Shape of the array is : ", arr.shape)
print("Size of the array is : ", arr.size)
print("-"*20)
print("Reshaping array to 4 dimentional array, with 2 rows of 3, 2D arrays")
"""
4th dimension has 2 3D arrays
3rd dimesion has 3 2D arrays
2nd dimension has 2 arrays with 2 elements each
1st dimention has 2 elements
"""
newarr = arr.reshape(2,3,2,2) # should be equal to dimension
print(newarr)
print("# of dims in newarr",newarr.ndim)
print("Shape of the newarr is : ", newarr.shape) # why this is not changing?
print("Size of the newarr is : ", newarr.size)
print("-"*20)
# can reshape only to same size array
# Unknown dimension
# Note: We can not pass -1 to more than one dimension.
"""
4th dimension has 6 3D arrays
3rd dimesion has 2 2D arrays
2nd dimension has 2 arrays with 1 elements each
1st dimention has 1 elements
"""
newarr = arr.reshape(-1,2,2,1) # should be equal to dimension, 4 in this case as we want 4D array
print(newarr)
print("# of dims in newarr",newarr.ndim)
print("Shape of the newarr is : ", newarr.shape)
print("Size of the newarr is : ", newarr.size)
print("-"*20)
###Output
[[[[50]
[21]]
[[72]
[80]]]
[[[20]
[79]]
[[12]
[45]]]
[[[46]
[93]]
[[77]
[ 8]]]
[[[50]
[85]]
[[68]
[46]]]
[[[99]
[ 8]]
[[88]
[42]]]
[[[49]
[23]]
[[ 6]
[92]]]]
# of dims in newarr 4
Shape of the newarr is : (6, 2, 2, 1)
Size of the newarr is : 24
--------------------
###Markdown
Flattening the arrays
###Code
flatarr = newarr.reshape(-1)
print(flatarr)
print("# of dims in flatarr",flatarr.ndim)
print("Shape of the flatarr is : ", flatarr.shape)
print("Size of the flatarr is : ", flatarr.size)
print("-"*20)
###Output
[50 21 72 80 20 79 12 45 46 93 77 8 50 85 68 46 99 8 88 42 49 23 6 92]
# of dims in flatarr 1
Shape of the flatarr is : (24,)
Size of the flatarr is : 24
--------------------
###Markdown
**Note:** There are a lot of functions for changing the shapes of arrays in numpy `flatten`, `ravel` and also for rearranging the elements `rot90`, `flip`, `fliplr`, `flipud` etc. These fall under Intermediate to Advanced section of numpy.
###Code
print("flatten", newarr.flatten())
print("ravel ", newarr.ravel())
###Output
flatten [50 21 72 80 20 79 12 45 46 93 77 8 50 85 68 46 99 8 88 42 49 23 6 92]
ravel [50 21 72 80 20 79 12 45 46 93 77 8 50 85 68 46 99 8 88 42 49 23 6 92]
###Markdown
Iterating
###Code
# 1D array
arr = np.random.randint(1,100,10)
for i in arr:
print(i)
# 2D array
arr = np.random.randint(1,100, size=(3,5))
for k,v in enumerate(arr):
print("row-"+str(k)+" : ",v)
# 3D array
arr = np.random.randint(1,100, size=(3,5,2))
for i,x in enumerate(arr):
print("row-"+str(i)+" : \n",x)
print("-"*20)
for j,y in enumerate(x):
print("row-"+str(i)+","+str(j)+" : ",y)
print("="*20)
# 4D array
arr = np.random.randint(1,100, size=(2,1,3,4))
for i,x in enumerate(arr):
print("4D row-"+str(i)+" : \n",x)
print("-"*20)
for j,y in enumerate(x):
print("3D row-"+str(i)+","+str(j)+" : \n",y)
for k,z in enumerate(y):
print("2D row-"+str(i)+","+str(j)+","+str(k)+" : ",z)
for l,a in enumerate(z):
print("1D row-"+str(i)+","+str(j)+","+str(k)+","+str(l)+" : ",a)
print("="*20)
###Output
4D row-0 :
[[[69 57 18 87]
[85 86 50 43]
[42 11 14 8]]]
--------------------
3D row-0,0 :
[[69 57 18 87]
[85 86 50 43]
[42 11 14 8]]
2D row-0,0,0 : [69 57 18 87]
1D row-0,0,0,0 : 69
1D row-0,0,0,1 : 57
1D row-0,0,0,2 : 18
1D row-0,0,0,3 : 87
====================
2D row-0,0,1 : [85 86 50 43]
1D row-0,0,1,0 : 85
1D row-0,0,1,1 : 86
1D row-0,0,1,2 : 50
1D row-0,0,1,3 : 43
====================
2D row-0,0,2 : [42 11 14 8]
1D row-0,0,2,0 : 42
1D row-0,0,2,1 : 11
1D row-0,0,2,2 : 14
1D row-0,0,2,3 : 8
====================
4D row-1 :
[[[64 85 2 23]
[14 68 65 39]
[ 2 7 4 33]]]
--------------------
3D row-1,0 :
[[64 85 2 23]
[14 68 65 39]
[ 2 7 4 33]]
2D row-1,0,0 : [64 85 2 23]
1D row-1,0,0,0 : 64
1D row-1,0,0,1 : 85
1D row-1,0,0,2 : 2
1D row-1,0,0,3 : 23
====================
2D row-1,0,1 : [14 68 65 39]
1D row-1,0,1,0 : 14
1D row-1,0,1,1 : 68
1D row-1,0,1,2 : 65
1D row-1,0,1,3 : 39
====================
2D row-1,0,2 : [ 2 7 4 33]
1D row-1,0,2,0 : 2
1D row-1,0,2,1 : 7
1D row-1,0,2,2 : 4
1D row-1,0,2,3 : 33
====================
###Markdown
Iterating Arrays Using `nditer()`The function nditer() is a helping function that can be used from very basic to very advanced iterations.
###Code
# 3D array
arr = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
# prints each element in the nd array
for x in np.nditer(arr):
print(x)
###Output
1
2
3
4
5
6
7
8
###Markdown
Iterating Array With Different Data TypesWe can use `op_dtypes` argument and pass it the expected datatype to change the datatype of elements while iterating.NumPy does not change the data type of the element in-place (where the element is in array) so it needs some other space to perform this action, that extra space is called buffer, and in order to enable it in `nditer()` we pass `flags=['buffered']`.
###Code
# 3D array
arr = np.random.randint(1,100, size=(2,2,2))
for i in np.nditer(arr, op_dtypes=['S'], flags=['buffered']):
print(i)
###Output
b'95'
b'81'
b'50'
b'98'
b'97'
b'27'
b'34'
b'70'
###Markdown
nditer step size
###Code
# 3D array
arr = np.random.randint(1,100, size=(5,3,8))
print(arr)
print('-'*20)
print(arr[1:5:2, ::2,3::4])
"""
1. identify the rows from 3D array, 1:5:2 => every other row starting from 2nd element, (index 1 and 3)
2. get row at index 0 and 2 from array at index 1 and 3 from prev step
3. get every 4th element, starting form index 3 from prev step result
"""
for x in np.nditer(arr[1:5:2, ::2,3::4]):
print(x)
###Output
[[[19 99 4 24 42 89 27 34]
[99 99 29 66 58 94 85 26]
[52 36 9 58 16 90 87 13]]
[[36 8 11 85 25 6 95 96]
[58 92 36 81 87 12 39 43]
[32 57 25 23 99 65 90 33]]
[[69 4 72 6 80 34 17 79]
[76 24 15 52 51 81 39 31]
[69 76 6 16 59 16 54 32]]
[[77 51 97 65 85 79 67 67]
[93 66 82 32 74 30 68 20]
[43 75 22 89 78 28 10 24]]
[[39 91 77 7 11 28 26 16]
[ 7 96 8 89 84 20 76 98]
[13 81 51 45 82 37 20 23]]]
--------------------
[[[85 96]
[23 33]]
[[65 67]
[89 24]]]
85
96
23
33
65
67
89
24
###Markdown
Enumerated Iteration Using `ndenumerate()`
###Code
arr = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
for idx, x in np.ndenumerate(arr):
print(idx, x)
arr = np.random.randint(1,100,size=(4,5,2))
for idx,x in np.ndenumerate(arr):
print(idx,x)
###Output
(0, 0, 0) 92
(0, 0, 1) 56
(0, 1, 0) 88
(0, 1, 1) 1
(0, 2, 0) 42
(0, 2, 1) 71
(0, 3, 0) 79
(0, 3, 1) 18
(0, 4, 0) 84
(0, 4, 1) 31
(1, 0, 0) 71
(1, 0, 1) 20
(1, 1, 0) 13
(1, 1, 1) 43
(1, 2, 0) 21
(1, 2, 1) 13
(1, 3, 0) 51
(1, 3, 1) 17
(1, 4, 0) 15
(1, 4, 1) 11
(2, 0, 0) 4
(2, 0, 1) 98
(2, 1, 0) 78
(2, 1, 1) 57
(2, 2, 0) 1
(2, 2, 1) 13
(2, 3, 0) 60
(2, 3, 1) 95
(2, 4, 0) 54
(2, 4, 1) 67
(3, 0, 0) 95
(3, 0, 1) 44
(3, 1, 0) 69
(3, 1, 1) 88
(3, 2, 0) 58
(3, 2, 1) 30
(3, 3, 0) 97
(3, 3, 1) 13
(3, 4, 0) 95
(3, 4, 1) 79
###Markdown
Joining NumPy ArraysJoining means putting contents of two or more arrays in a single array. In SQL we join tables based on a key, whereas in NumPy we join arrays by axes. axis = 0 => rows axis = 1 => cols - concatinate - stack - hstack - vstack - dstack
###Code
# 1D concatination
arr1 = np.random.randint(1,100,size=(10))
arr2 = np.random.randint(1,100,size=(10))
arr = np.concatenate((arr1,arr2))
print(arr1)
print(arr2)
print(arr)
# 2D concatination
arr1 = np.random.randint(1,100,size=(3,10))
arr2 = np.random.randint(1,100,size=(3,1))# just 1 col but same number of rows
# arr2 = np.random.randint(1,100,size=(4,1))# just 1 col but different number of rows, this does not work in case of axis 1, will work for axis = 0
# with axis = 1
arr = np.concatenate((arr1,arr2), axis=1)
print(arr1)
print("-"*20)
print(arr2)
print("-"*20)
print(arr)
###Output
[[55 30 12 76 24 74 19 41 59 40]
[ 4 50 56 39 31 84 14 28 8 55]
[71 95 8 62 16 96 97 44 99 61]]
--------------------
[[19]
[91]
[69]]
--------------------
[[55 30 12 76 24 74 19 41 59 40 19]
[ 4 50 56 39 31 84 14 28 8 55 91]
[71 95 8 62 16 96 97 44 99 61 69]]
###Markdown
Stack
###Code
arr1 = np.random.randint(1,50,size=(3,7))
arr2 = np.random.randint(1,50,size=(3,7))
arr = np.stack((arr1,arr2))
print(arr1)
print("-"*20)
print(arr2)
print("-"*20)
print("this puts arr1 on top of arr2 and creates 3D array as arr1 and arr2 are 2D arrays, has 2 rows")
print(arr)
print("# of dims",arr.ndim)
print("shape of dims",arr.shape)
arr1 = np.random.randint(1,50,size=(3,7))
arr2 = np.random.randint(1,50,size=(3,7))
arr = np.stack((arr1,arr2), axis=1)
print(arr1)
print("-"*20)
print(arr2)
print("-"*20)
print("this puts row at index i from arr1 and arr2 on top of each other and creates 3D array as arr1 and arr2 are 2D arrays, has 3 rows now")
print(arr)
print("# of dims",arr.ndim)
print("shape of dims",arr.shape)
# hstack
arr1 = np.random.randint(1,50,size=(3,7))
arr2 = np.random.randint(1,50,size=(3,7))
arr = np.hstack((arr1,arr2))
print(arr1)
print("-"*20)
print(arr2)
print("-"*20)
print("concatinates corresponding row from arr1 and arr2 at index i, changes number of columns ")
print(arr)
print("# of dims",arr.ndim)
print("shape of dims",arr.shape)
# vstack
arr1 = np.random.randint(1,50,size=(3,7))
arr2 = np.random.randint(1,50,size=(3,7))
arr = np.vstack((arr1,arr2))
print(arr1)
print("-"*20)
print(arr2)
print("-"*20)
print("stacks one array on top of another array, changes number of rows")
print(arr)
print("# of dims",arr.ndim)
print("shape of dims",arr.shape)
# dstack
arr1 = np.random.randint(1,50,size=(2,7))
arr2 = np.random.randint(1,50,size=(2,7))
arr3 = np.random.randint(1,50,size=(2,7))
arr = np.dstack((arr1,arr2,arr3))
print(arr1)
print("-"*20)
print(arr2)
print("-"*20)
print("creates rows in 3D array = # of rows in arr1,arr2,arr3, 2 in this case")
print("creates colums = # of arrays, arr1,arr2,arr3 = 3")
print("creates rows in each 2D array = # of cols in arr1,arr2,arr3 = 7")
print(arr)
print("# of dims",arr.ndim)
print("shape of dims",arr.shape)
###Output
[[ 7 46 11 5 27 31 21]
[48 41 44 15 9 11 37]]
--------------------
[[34 36 22 26 7 13 28]
[38 26 31 9 16 10 32]]
--------------------
creates rows in 3D array = # of rows in arr1,arr2,arr3, 2 in this case
creates colums = # of arrays, arr1,arr2,arr3 = 3
creates rows in each 2D array = # of cols in arr1,arr2,arr3 = 7
[[[ 7 34 25]
[46 36 48]
[11 22 47]
[ 5 26 3]
[27 7 33]
[31 13 7]
[21 28 31]]
[[48 38 11]
[41 26 35]
[44 31 32]
[15 9 32]
[ 9 16 26]
[11 10 11]
[37 32 4]]]
# of dims 3
shape of dims (2, 7, 3)
###Markdown
Splitting NumPy ArraysSplitting is reverse of joiningwe use `array_split` method as `split` method does not adjust if the number of elemets dont match
###Code
# split array into 3 arrays
arr = np.random.randint(1,50,size=(10))
newarr = np.array_split(arr, 3)
print(arr)
print("-"*20)
print()
print(newarr)
# split array into 4 arrays
arr = np.random.randint(1,50,size=(6))
newarr = np.array_split(arr, 4)
print(arr)
print("-"*20)
print(newarr)
# 2D array, split array into 3 arrays, col axis, so rows are adjusted
arr = np.random.randint(1,50,size=(5,3))
newarr = np.array_split(arr, 3)
print(arr)
print("-"*20)
# array will be 2D, number of columns will not change number of rows will be adjuted if required
for k,val in np.ndenumerate(newarr):
print(k,"\n",val)
# 2D array, split array into 2 arrays, row axis, so cols are adjusted
arr = np.random.randint(1,50,size=(4,3))
newarr = np.array_split(arr, 2, axis=1)
print(arr)
print("-"*20)
print(newarr)
print("-"*20)
# array will be 2D, number of rows will not change number of cols will be adjuted if required
# first roe has 2 cols
# second row has 1 col
# hsplit
arr = np.random.randint(1,50,size=(2,8))
newarr = np.hsplit(arr, 4)
print(arr)
print("-"*20)
print(newarr)
print("-"*20)
# need 4 arrays
# first 2 cols for each row make the first array
# 3rd and 4th cols for each row make the 2nd array
# 5th and 6th cols for each row make the 3rd array
# 7th and 8th cols for each row make the 4th array
###Output
[[35 11 25 37 48 48 22 5]
[ 7 42 15 40 36 17 48 24]]
--------------------
[array([[35, 11],
[ 7, 42]]), array([[25, 37],
[15, 40]]), array([[48, 48],
[36, 17]]), array([[22, 5],
[48, 24]])]
--------------------
###Markdown
**`Note:`** Similar alternates to `vstack()` and `dstack()` are available as `vsplit()` and `dsplit()`. NumPy Searching ArraysSearching array returns the indices of the result To search an array, use the `where()` method.
###Code
arr = np.array([1, 2, 3, 4, 5, 4, 4])
x = np.where(arr == 4)
print(x)
# every third element till 50
arr = np.arange(1,50,3)
print(arr)
# index for every even number in the above list
x = np.where(arr%2 == 0)
print(x)
# every fifth element till 50
arr = np.arange(1,50,5)
print(arr)
# index for every odd number in the above list
x = np.where(arr%2 == 1)
print(x)
###Output
[ 1 6 11 16 21 26 31 36 41 46]
(array([0, 2, 4, 6, 8]),)
###Markdown
search sorted
###Code
arr = np.arange(2,50,2)
print(arr)
# search single value in sorted order
x = np.searchsorted(arr,14)
print(x)
arr = np.arange(2,50,2)
print(arr)
# search single value in sorted order from right side
x = np.searchsorted(arr, 14, side='right') # this one has issue
print('Index of 14 is : ',x)
print("Element at "+str(x)+" is : ",arr[x])
arr = np.arange(2,50,2)
print(arr)
# search single value in sorted order from right side
x = np.searchsorted(arr, [14,48,40])
print(x)
###Output
[ 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30 32 34 36 38 40 42 44 46 48]
[ 6 23 19]
###Markdown
Sorting Arraysputting elements in ordered sequence
###Code
# number
arr = np.array([3, 2, 0, 1])
print(arr,"\n", np.sort(arr),'\n----\n')
# String
arr = np.array(['banana', 'cherry', 'apple'])
print(arr, "\n", np.sort(arr),"\n----\n")
# boolean
arr = np.array([True, False, True])
print(arr, "\n", np.sort(arr),"\n----\n")
# 2D array
arr = np.array([[3, 2, 4], [5, 0, 1]])
print(arr, "\n----\n", np.sort(arr))
# reversed sort
arr = np.array([3, 2, 45, 0, 1])
print(arr, "\n----\n", np.sort(arr)[::-1])
print('-'*20)
arr = np.array([[3, 2, 4], [5, 0, 1]])
print(arr, "\n----\n", np.sort(arr)[:,::-1])
###Output
[ 3 2 45 0 1]
----
[45 3 2 1 0]
--------------------
[[3 2 4]
[5 0 1]]
----
[[4 3 2]
[5 1 0]]
###Markdown
Filter arraysGetting some elements out of an existing array and creating a new array out of them is called filtering.In NumPy, you filter an array using a boolean index list.A boolean index list is a list of booleans corresponding to indexes in the array.If the value at an index is True that element is contained in the filtered array, if the value at that index is False that element is excluded from the filtered array.
###Code
arr = np.array([41, 42, 43, 44])
# print item at index 0 and 2
x = [True, False, True, False]
newarr = arr[x]
print(newarr)
# print elements higher than 42
arr = np.array([41, 42, 43, 44])
# create empty filter array
filter_arr = []
for i in arr:
if i >42:
filter_arr.append(True)
else:
filter_arr.append(False)
# pass the filter_array to actual array
newarr = arr[filter_arr]
print(filter_arr)
print(newarr)
# Create a filter array that will return only even elements from the original array
arr = np.arange(1,21)
# create empty filter array
filter_arr = []
for i in arr:
if i %2 == 0:
filter_arr.append(True)
else:
filter_arr.append(False)
# pass the filter_array to actual array
newarr = arr[filter_arr]
print(filter_arr)
print(newarr)
###Output
[False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True, False, True]
[ 2 4 6 8 10 12 14 16 18 20]
###Markdown
Creating Filter Directly From Array
###Code
# Create a filter array that will return only values higher than 42:
arr = np.array([41, 42, 43, 44])
filter_arr = arr > 42
newarr = arr[filter_arr]
print(filter_arr)
print(newarr)
# Create a filter array that will return only even elements from the original array:
arr = np.array([1, 2, 3, 4, 5, 6, 7])
filtered_arr = arr %2 ==0
newarr = arr[filtered_arr]
print(filter_arr)
print(newarr)
###Output
[False False True True]
[2 4 6]
###Markdown
Random
###Code
x = np.random.randint(100)
x
randomArr = np.random.randint(1,100,size=(4,5))
randomArr
randomArr = np.random.randint(1,100,size=(5))
randomArr
randomArr = np.random.randint(1,100,size=(4,5,10))
randomArr
# Use rand for floats
randomArr = np.random.rand(4,5,2,2)
randomArr
# Choice
np.random.choice(randomArr.reshape(-1))
np.random.choice(randomArr.reshape(-1),size=(3,50))
## Random Distribution
np.random.choice([3, 5, 7, 9], p=[0.1, 0.3, 0.6, 0.0], size=(3, 5))
arr = np.array([1,2,3,4,5])
np.random.shuffle(arr)
print(arr)
x = np.random.randint(100,200,size=(2,2))
x
np.random.permutation(x)
x
## Plotting
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme()
sns.distplot(np.random.randint(1,100,size=(10)))
sns.distplot([0, 1, 2, 3, 4, 5])
sns.distplot([0, 1, 2, 3, 4, 5],hist=False)
## Normal Distribution
# loc - mean
# scale - size
# size - size of array
normalDistribution = np.random.normal(loc=12,scale=1.2,size=200)
normalDistribution
sns.distplot(np.random.normal(loc=0,scale=1,size=1000), hist=False)
## Binomial Distribution
# n - number of trials.
# p - probability of occurence of each trial (e.g. for toss of a coin 0.5 each).
# size - The shape of the returned array
binomialDist = np.random.binomial(n=10,p=0.5,size=10000)
binomialDist
sns.distplot(binomialDist,hist=True, kde=False)
sns.distplot(np.random.normal(loc=50, scale=5, size=1000), hist=False, label='normal')
sns.distplot(np.random.binomial(n=100, p=0.5, size=1000), hist=False, label='binomial')
## Poisson Distributition
# lam - rate of known number of occurence
# size - shape of returned array
poissonDist = np.random.poisson(lam=2,size=50)
poissonDist
sns.distplot(np.random.poisson(lam=2,size=500),hist=False)
sns.distplot(np.random.normal(loc=50, scale=7, size=1000), hist=False, label='normal')
sns.distplot(np.random.binomial(n=100, p=0.5, size=1000), hist=False, label='binomial')
sns.distplot(np.random.poisson(lam=50, size=1000), hist=False, label='poisson')
plt.legend()
# Uniform Distribution
# a = lower bound
# b = upper bound
# size = shape of the array
uniformDist = np.random.uniform(-1,1,size=(1000))
uniformDist
sns.distplot(uniformDist,hist=False)
## Logistic Distributition
# loc - mean
# scale - sd
# size - shape of array
logisiticDist = np.random.logistic(loc=1,scale=2,size=(2,3))
logisiticDist
sns.distplot(np.random.logistic(size=1000), hist=False)
np.log2(np.arange(1,100))
np.sin(np.pi/2)
import pandas as pd
data = {
"X": np.random.normal(loc=5,scale=50,size=100),
"Y": np.random.poisson(lam=5,size=100),
}
df = pd.DataFrame(data)
df
sns.scatterplot(df.X,df.Y)
df.index
df.loc[1]
df.info()
df.describe().T
df = pd.read_csv('testDataSet.csv')
df.head()
df.info()
df.describe().T
# dropping NA
df = df.dropna()
df.info()
df.Date = pd.to_datetime(df.Date)
df
df.duplicated()
df.drop_duplicates(inplace=True)
df.duplicated()
df.corr()
plt.figure(figsize=(10,10))
sns.heatmap(df.corr(),annot=True)
###Output
_____no_output_____
###Markdown
###Code
#importar a lib
import numpy as np
#Criar uma matriz
#Vamos usar esse tipo de situação quando for trabalhar com marchiliane
Array = np.array( [10, 20, 30, 40, 50])
Array
#tipe
type( Array )
Duas = np.array(
[[10, 9, 8, 7, 6],
[5, 4, 3, 2, 1]]
)
print( Duas )
Tres = np.array(
[[15, 14, 13, 12, 11],
[10, 9, 8, 7, 6],
[5, 4, 3, 2, 1]]
)
print( Tres )
Quatro = np.array(
[[20, 19, 18, 17, 16],
[18, 17, 16, 15, 14],
[13, 12, 11, 10, 9],
[8, 7, 6, 0, 4]]
)
print( Quatro )
#Experimento de Processamento
#se a base for muito grande, melhor usar np.arange
Extressando_MP = np.arange( 100000 )
len( Extressando_MP)
#1 teste
%time for loop in Extressando_MP: pass
Extressando_MP = np.arange( 100000 )
Extressando_MP_02 = np.arange( 500000 )
Extressando_MP_03 = np.arange( 10000000 )
Extressando_MP_04 = np.arange( 50000000 )
print( len( Extressando_MP ), len( Extressando_MP_02 ), len( Extressando_MP_03 ), len( Extressando_MP_04 ))
#3 teste
%time for loop in Extressando_MP_03: pass
#4 teste
%time for loop in Extressando_MP_04: pass
#numpy é excelente para processar uma base de dados muito grande, compensa transformar em array aquela coluna especifico e ai vc faz o processamento. O desempenho é melhor do que rodar no pandas.
#Acessar os Arrays
# 1 Posição
Array[0]
#ultima posição
Array[-1]
#range
Array[2:]
#Operações
print( Array[0], Array[1] )
Array[0] + Array[1]
#subtrair
Array[0] - Array[1]
#multiplicar
Array[0] * Array[1]
#dividir
Array[0] / Array[1]
#Operações Lógica
Array[0] == Array[1]
Array[0] >= Array[1]
Array[0] <= Array[1]
#Verificar Dimensão (igual no pandas)
Array.shape
Duas.shape
Tres.shape
Quatro.shape
#loop
for loop in Array:
print( loop )
for loop in Duas:
print( loop )
#podemos usar essa situação caso queira percorrer uma array dentro de outro array
for loop in Duas:
for Interno in loop:
print( Interno )
#O arrays usam muito para várias situações
#podemos chamar o panda também
import pandas as pd
pd.DataFrame( Array )
pd.DataFrame( Array, columns={'Vetor 1 Dimensao'})
###Output
_____no_output_____
###Markdown
numpy 的常用方法中文文档: https://www.numpy.org.cn/
###Code
import numpy as np
import matplotlib.pyplot as plt
np.abs?
lst = [3.13, 2.14, 0, 1, 2]
nd1 = np.array(lst)
print(nd1)
print(type(nd1))
lst = [
[3.14, 2.12, 0, 1, 2],
[1, 2, 3, 4, 5],
]
nd2 = np.array(lst)
print(nd2)
print(type(nd2))
###Output
[[3.14 2.12 0. 1. 2. ]
[1. 2. 3. 4. 5. ]]
<class 'numpy.ndarray'>
###Markdown
常用的随机函数
###Code
nd3 = np.random.random([3, 3])
print(nd3)
print(nd3.shape)
###Output
[[0.70872183 0.17401859 0.43962316]
[0.13469743 0.61092746 0.40973292]
[0.5983096 0.87898676 0.23632317]]
(3, 3)
###Markdown
常用的特殊矩阵函数
###Code
nd4 = np.random.random([5, 5])
np.savetxt(X=nd4, fname="data.txt") # 暂存到文件中
nd5 = np.loadtxt('data.txt') # 从文件中读取
print(nd5)
print(np.arange(10)) # 类似于 range
print(np.arange(1, 3, 0.3))
print(np.linspace(0, 10, 10)) # 生成线性等分, 因为会包含数据的起点和终点, 10 是最后生成的数量, 所以其实每一分是 (10-0)/(10-1)
print(np.linspace(0, 10, 11))
x = np.arange(0, 10)
y = np.logspace(0, 10, 10)
print(x)
print(y)
plt.plot(x, y)
plt.show()
np.random.seed(2019)
nd6 = np.random.random([10])
print(nd6)
print(nd6[3])
print(nd6[1:6:2])
print(nd6[::-2])
nd7 = np.arange(25).reshape([5, 5])
print(nd7)
print(nd7[1:3, :]) # 指定的行
print(nd7[:, 1:3]) # 指定的列
print(nd7[1:3, 1:3])
print(nd7[(nd7>3)&(nd7<10)]) # 值在指定范围内的数据
nd8 = np.arange(25)
c1 = np.random.choice(nd8, size=(3, 4)) # 随机抽取指定的数据, 形状是 (3, 4)
c2 = np.random.choice(nd8, size=(3, 4), replace=False) # 不能重复抽取
c3 = np.random.choice(nd8, size=(3, 4), p=nd8/np.sum(nd8)) # p 参数指定抽取的概率
print(c1)
print(c2)
print(c3)
###Output
[[18 5 23 24]
[21 20 16 15]
[ 1 22 14 12]]
[[ 4 13 22 14]
[ 7 23 5 17]
[12 9 20 24]]
[[19 21 18 11]
[10 21 23 19]
[18 19 11 20]]
###Markdown
对应元素相乘(Element-Wise Product)是两个矩阵中对应元素乘积。np.multiply函数用于数组或矩阵对应元素相乘,输出与相乘数组或矩阵的大小一致
###Code
A = np.array([[1, 2], [-1, 4]])
B = np.array([[2, 0], [3, 4]])
print(A*B) # 逐元素相乘
print(np.multiply(A, B))
print(A*2.0)
X = np.random.rand(2,3)-0.5
def softmoid(x):
return 1/(1+np.exp(-x))
def relu(x):
return np.maximum(0, x)
def softmax(xf):
return np.exp(x)/np.sum(np.exp(x))
print("输入参数 X:\n", X)
print("softmoid:\n", softmoid(X))
print("relu:\n", relu(X))
print("softmax:\n", softmax(X))
###Output
输入参数 X:
[[ 0.11777241 0.27521445 -0.47348728]
[ 0.12686265 -0.36200244 -0.2044518 ]]
softmoid:
[[0.52940912 0.56837259 0.38379118]
[0.53167319 0.41047492 0.44906436]]
relu:
[[0.11777241 0.27521445 0. ]
[0.12686265 0. 0. ]]
softmax:
[7.80134161e-05 2.12062451e-04 5.76445508e-04 1.56694135e-03
4.25938820e-03 1.15782175e-02 3.14728583e-02 8.55520989e-02
2.32554716e-01 6.32149258e-01]
###Markdown
点积运算(Dot Product)又称为内积,在Numpy用np.dot表示
###Code
X1 = np.array([[1, 2], [3, 4]])
X2 = np.array([[5, 6, 7], [8, 9 , 10]])
X3 = np.dot(X1, X2)
print(X3)
###Output
[[21 24 27]
[47 54 61]]
###Markdown
更改形状的常用函数
###Code
nd = np.arange(10)
print(nd)
print(nd.reshape(2, 5))
print(nd.reshape(5, -1)) # 使用 -1 来自动计算其他维度
print(nd.reshape(-1, 5))
nd = np.arange(10)
print(nd)
print(nd.resize(2, 5)) # 修改向量本身, 而不是返回新值
print(nd)
nd = np.arange(12).reshape(3, 4)
print(nd)
print(nd.T)
nd = np.arange(6).reshape(2, -1)
print(nd)
print(nd.ravel('F')) # 按列优先展平
print(nd.ravel()) # 按行优先展平
# https://www.geeksforgeeks.org/differences-flatten-ravel-numpy/
print(np.ravel(nd))
print(np.ravel(nd, 'F'))
nd = np.arange(12).reshape(3, 4)
print(nd)
print(nd.flatten()) # 将矩阵转换为向量
nd = np.arange(3).reshape(3, 1)
print(nd.shape)
print(nd.squeeze().shape) # 用来降维, 将矩阵中含 1 的维度去掉
nd = np.arange(24).reshape(2, 3, 4)
print(nd.shape)
print(nd.transpose(1, 2, 0).shape) # 进行轴对换
###Output
(2, 3, 4)
(3, 4, 2)
###Markdown
合并数组的函数
###Code
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
c = np.append(a, b)
print(c)
a = np.arange(6).reshape(2, 3)
b = np.arange(6).reshape(2, 3)
c = np.append(a, b, axis=0) # 按行合并
print(c.shape)
print(c)
d = np.append(a, b, axis=1) # 按列合并
print(d.shape)
print(d)
a = np.array([[1, 2], [3, 4]])
b = np.array([[5, 6]])
c = np.concatenate((a, b), axis=0) # 按行合并
print(a.shape, b.shape, c.shape)
print(c)
d = np.concatenate((a, b.T), axis=1) # 按列合并. 合并的时候其他维度要相等, 所以使用 b.T
print(a.shape, b.T.shape, d.shape)
print(d)
a = np.array([[1, 2, 1], [3, 4, 2]])
b = np.array([[5, 6, 1], [7, 8, 4]])
c = np.stack((a, b), axis=0) # 沿着指定的轴叠加数组或矩阵
print(c)
print(a.shape, b.shape, c.shape)
data_train = np.random.randn(10000, 2, 3)
print(data_train.shape)
np.random.shuffle(data_train)
batch_size = 100
for i in range(0, len(data_train), batch_size):
x_batch_sum = np.sum(data_train[i:i+batch_size])
###Output
(10000, 2, 3)
###Markdown
通用函数 广播机制的规则:1. 让所有输入数组都向其中shape最长的数组看齐,不足的部分则通过在前面加1补齐2. 输出数组的shape是输入数组shape的各个轴上的最大值3. 如果输入数组的某个轴和输出数组的对应轴的长度相同或者某个轴的长度为1时,这个数组能被用来计算,否则出错4. 当输入数组的某个轴的长度为1时,沿着此轴运算时都用(或复制)此轴上的第一组值
###Code
# 广播机制示例
A = np.arange(0, 40, 10).reshape(4, 1)
B = np.arange(0, 3)
C = A + B
print(A, A.shape)
print(B, B.shape)
print(C, C.shape)
###Output
[[ 0]
[10]
[20]
[30]] (4, 1)
[0 1 2] (3,)
[[ 0 1 2]
[10 11 12]
[20 21 22]
[30 31 32]] (4, 3)
###Markdown
切片单维数组的切片和 list 的切片一致, 也可以使用 slice 函数, 三个参数分别是 start, stop, step.
###Code
a = np.arange(10)
s = slice(2, 7, 2)
a[s]
a[2:7:2]
###Output
_____no_output_____
###Markdown
多维数组, 使用 逗号`,` 分隔, 同样可以在每个维度上使用切片.
###Code
a = np.arange(12).reshape((3, 4))
a
a[1:]
a[:, 1:3]
a[1:, 1:3]
###Output
_____no_output_____
###Markdown
切片还可以使用 `...` 选择元组的长度和数组的维度相同
###Code
a[1, ...] # 在第一维度上选择第二行的数据
###Output
_____no_output_____
###Markdown
索引用逗号分隔的数组序列:- 序列的长度和多维数组的维度一致- 序列中每个数组的长度一致
###Code
a = np.arange(16).reshape((4, 4))
a
a[[0, 2], [3, 1]]
###Output
_____no_output_____
###Markdown
###Code
import numpy as np
#single dimensional numpy array
n1 = np.array([1,2,3,4,5])
n1
type(n1)
#multi dimensional numpy array
n2 = np.array([ [6,7,8,9,10], [11,12,13,14,15] ])
n2
type(n2)
#intialize numpy array with all zeros
n3 = np.zeros( (1,2))
n3
n4 = np.zeros( (4,5))
n4
#itialaziing numpy array with full values
n5 = np.full( (2,3), 10)
n5
#initialize numpy array within a range
n6 = np.arange(10,20)
n6
n7 = np.arange(10,21)
n7
n8 = np.arange( 10, 50 , 5)
n8
n9 = np.arange( 10, 51 , 5)
n9
# initalize numpy array with random numbers
n10 = np.random.randint(1, 100, 5)
n10
# Changing shape of an array
n11 = np.array( [ [1,2,3,4] , [5,6,7,8] ] )
n11
n11.shape
n11.shape = (4,2)
n11
#ADDITION OF NUMPY ARRAYS
array1 = np.array( [ 10, 20 ])
array2 = np.array( [ 30, 40 ])
# Addition of numpy array without axis means addition of all values in the array
np.sum( [array1 , array2])
# Addition of numpy array with axis = 0 means addition of all values in the array vertically
np.sum( [array1 , array2] , axis = 0 )
# Addition of numpy array with axis = 1 means addition of all values in the array horizontally
np.sum( [array1 , array2] , axis = 1 )
#JOINING OF NUMPY ARRAYS
n1 = np.array( [10,20,30] )
n2 = np.array( [40,50,60] )
# v-stack = vertical stack = stack arrays row-wise
np.vstack( [ n1, n2 ])
# h-stack = horizontal stack = stack arrays column-wise
np.hstack( [ n1, n2 ])
# column_stack = horizontal stack = stack arrays column-wise
np.column_stack( [ n1, n2 ])
###Output
_____no_output_____
###Markdown
Scientific Programming in Python Karl N. KirschnerBonn-Rhein-Sieg University of Applied SciencesSankt Augustin, Germany NumPy: NUMerical PYthon**Source**: https://numpy.org/doc/stable/Numpy is the foundation for- Pandas- Matplotlib- Scikit-learn- PyTorch- Excels at large **arrays** of data (i.e. VERY efficient) - RAM usage, and thus - Speed- Array: an n-dimensional array (i.e. NumPy's name: ndarray): - a collections of values that have 1 or more dimensions - 1D array --> vector - 2D array --> matrix - nD array --> tensor- All array data must be the same (i.e. homogeneous)- Can perform computations on entire arrays without the need of loops- Contains some nice mathematical funtions/tools (e.g. data extrapolation) - will be covered in the SciPy lecture- Does not come by default with Python - must be installedComparisons to a regular list:1. Both are a container for items/elements2. Numpy allows for faster items/elements getting (allows for faster mathematics), but3. List are faster to a insert new and remove existing items/elements Key Concept for Numpy1. Each element in an array must be the same type (e.g. floats) - allows for efficient useage of RAM - NumPy always knows what the content of the array is2. **Vectorizing operations** "This practice of replacing explicit loops with array expressions is commonly referred to as vectorization." - source: https://www.oreilly.com/library/view/python-for-data/9781449323592/ch04.html - do math operations all at once (i.e. does it one time only) on an ndarray3. Integrates with C, C++ and Fortran to improve performance - In this sense, NumPy is a intermediary between these low-level libraries and Python4. The raw array data is put into contiguous (and fixed) block of RAM - good at allocating space in RAM for storing the ndarrays **More Inforamation** for what is happening "under-the-hood": https://numpy.org/doc/stable/reference/internals.html**Citing Numpy**: (https://numpy.org/citing-numpy/)Harris, C.R., Millman, K.J., van der Walt, S.J. et al. Array programming with NumPy. Nature 585, 357–362 (2020).@Article{ harris2020array, title = {Array programming with {NumPy}}, author = {Charles R. Harris and K. Jarrod Millman and St{\'{e}}fan J. van der Walt and Ralf Gommers and Pauli Virtanen and David Cournapeau and Eric Wieser and Julian Taylor and Sebastian Berg and Nathaniel J. Smith and Robert Kern and Matti Picus and Stephan Hoyer and Marten H. van Kerkwijk and Matthew Brett and Allan Haldane and Jaime Fern{\'{a}}ndez del R{\'{i}}o and Mark Wiebe and Pearu Peterson and Pierre G{\'{e}}rard-Marchant and Kevin Sheppard and Tyler Reddy and Warren Weckesser and Hameer Abbasi and Christoph Gohlke and Travis E. Oliphant}, year = {2020}, month = sep, journal = {Nature}, volume = {585}, number = {7825}, pages = {357--362}, doi = {10.1038/s41586-020-2649-2}, publisher = {Springer Science and Business Media {LLC}}, url = {https://doi.org/10.1038/s41586-020-2649-2} }
###Code
import numpy as np
import pandas as pd
import time
import timeit
#%matplotlib inline
###Output
_____no_output_____
###Markdown
N-dimensional array object (i.e. ndarray)Let's create two objects:1. a regular list2. numpy array (via Array RANGE: https://numpy.org/doc/stable/reference/generated/numpy.arange.html), andThen we can find demonstrate which is faster using the timeit library.- timeit (to time code for performance): https://docs.python.org/3/library/timeit.html
###Code
my_list = list(range(100000))
my_list
my_array = np.arange(100000)
my_array
###Output
_____no_output_____
###Markdown
Now, lets multiply containers by 2, and do that math 10000 times.
###Code
def list_multiply(test_list):
return test_list*2
def numpy_multiply(test_array):
return test_array*2
###Output
_____no_output_____
###Markdown
First, let's see how the list performance is using the `time` library (https://docs.python.org/3/library/time.html):
###Code
start_time = time.process_time()
for _ in range(10000):
list_multiply(my_list)
stop_time = time.process_time()
print(f"Timing: {stop_time - start_time:0.1e} seconds")
###Output
_____no_output_____
###Markdown
Now for the Numpy array's performance:
###Code
start_time = time.process_time()
for _ in range(10000):
numpy_multiply(my_array)
stop_time = time.process_time()
print(f"Timing: {stop_time - start_time:0.1e} seconds")
###Output
_____no_output_____
###Markdown
The use of Numpy arrays is significantly faster than that for lists.**Side Note**: Notice how the underscore (i.e. `_`) was used above. There are several instances where one can use `_` in Python (see https://www.datacamp.com/tutorial/role-underscore-python), but in the above example it is employed1. to represent an variable that is not used further. timeithttps://docs.python.org/3/library/timeit.htmlAn very good altertive library for testing performanceMultiply containers by 2, and do that math 10000 times.
###Code
timeit.timeit(lambda:list_multiply(my_list), number=10000)
timeit.timeit(lambda:numpy_multiply(my_array), number=10000)
###Output
_____no_output_____
###Markdown
Creating Numpy Arrays from ScratchIn the following we will create several arrays that we can uses throughout this lecture. Conversion from listsLet's create 2 data lists with 5 data points each
###Code
list_1 = [6, 1, 6, 7, 9]
list_2 = [3, 5, 4, 2, 8]
list_2
###Output
_____no_output_____
###Markdown
Now create 2 arrays (each with a shape of (1, 5))
###Code
array_1 = np.array(list_1)
array_2 = np.array(list_2)
array_2
###Output
_____no_output_____
###Markdown
A slightly more complicated example...Create a **nested list**, with each sublist contains 5 data points:
###Code
list_3 = [[-6, 1, 6, 7, 9], [-5, 0, 2, 4, 3]]
list_3
###Output
_____no_output_____
###Markdown
Convert the nested lists to a Numpy array, with a shape of (2, 5)
###Code
array_3 = np.array(list_3)
array_3
###Output
_____no_output_____
###Markdown
Put `array_3` to memory - we will use it later in the lecture. Array shapes and dimensions 1D shape Recall that we created `array_1` via:`list_1 = [6, 1, 6, 7, 9]``array_1 = np.array(list_1)`Since we have it as a Numpy array, we can get's it shape:
###Code
array_1.shape
###Output
_____no_output_____
###Markdown
**Note** this would change if you added double brackets to the above declaration:`list_1 = [[6, 1, 6, 7, 9]]``array_1 = np.array(list_1)`As a demonstration:
###Code
example = [[6, 1, 6, 7, 9]]
test = np.array(example)
test.shape
###Output
_____no_output_____
###Markdown
nD shapeUse `array_3` as an example:
###Code
array_3
array_3.shape
array_3.ndim
###Output
_____no_output_____
###Markdown
Data types- https://numpy.org/doc/stable/reference/arrays.dtypes.html- https://numpy.org/doc/stable/user/basics.types.html
###Code
array_3.dtype
###Output
_____no_output_____
###Markdown
Reminder of using `type` to figure out what the object is that you are dealing with:
###Code
type(array_3)
###Output
_____no_output_____
###Markdown
More on creating new arrays An array that contains the same numberCreate an array with a shape of (3, 5), and fill it with an approximate pi value (e.g. 3.14):
###Code
np.full((3, 5), 3.14)
###Output
_____no_output_____
###Markdown
An array of integersCreate an array with a shape of (1, 30) from -10 to 50 using a stepping size of 2(similar to built-in `range` function)- np.arrange: https://numpy.org/doc/stable/reference/generated/numpy.arange.html
###Code
np.arange(-10, 52, 2)
###Output
_____no_output_____
###Markdown
An array of floatsCreate an array that contains 10 evenly spaced values between -1 and 1- numpy's linspace: https://numpy.org/devdocs/reference/generated/numpy.linspace.html
###Code
np.linspace(-1, 1, 10)
###Output
_____no_output_____
###Markdown
An array of random numbersCreate array with random, but continuous distributed, values between 0 and 1- random.random_sample function: https://numpy.org/doc/stable/reference/random/generated/numpy.random.random_sample.htmlnumpy.random.random_sampleAn array with a shape of (3,):
###Code
np.random.random_sample(3)
###Output
_____no_output_____
###Markdown
An array with a shape of (3, 4):
###Code
np.random.random_sample((3, 4))
###Output
_____no_output_____
###Markdown
Accessing arrays One dimensional arrayLet's look at the (5,) `array_1` from above
###Code
array_1
###Output
_____no_output_____
###Markdown
Accessing the fourth item position (i.e. at an index of 3)
###Code
array_1[3]
###Output
_____no_output_____
###Markdown
A multidimensional array Now look at a 2D array (i.e. (2, 5) from above)
###Code
array_3
###Output
_____no_output_____
###Markdown
Access the fist sublist from the 2D array
###Code
array_3[0]
###Output
_____no_output_____
###Markdown
Access the second sublist and the fourth item positionarray([[-6, 1, 6, 7, 9], [-5, 0, 2, **4**, 3]]
###Code
array_3[1, 3]
###Output
_____no_output_____
###Markdown
Slicing an arrayDemo using `array_3[0]` and slicing via- [0:1]- [1:2]- [0:2]- [0:3] Slice to obtain the first nested array (same as `array_3[0]`):
###Code
array_3[0:1]
###Output
_____no_output_____
###Markdown
Slice to obtain the second nested array
###Code
array_3[1:2]
###Output
_____no_output_____
###Markdown
Slice to obtain the entire array
###Code
array_3[0:2]
###Output
_____no_output_____
###Markdown
Notice that we can specify upper numbers that go beyond the array:
###Code
array_3[0:6]
###Output
_____no_output_____
###Markdown
Filter (search) for elements- numpy arrays are not index like a list, so the more typical filtering/searching methods are not available (e.g. list comprehension)- numpy.where is used instead: https://numpy.org/doc/stable/reference/generated/numpy.where.htmlHow might we filter a regular Python list:
###Code
list_4 = [-6, 1, 6, 7, 9, -5, 0, 2, 4, 3]
[number for number in list_4 if number < 0]
###Output
_____no_output_____
###Markdown
**Side note**: the above "list comprehension" approach replaces the following code. This is more concise, but sometimes you might loose readability.
###Code
filtered_list = []
for number in list_4:
if number < 0:
filtered_list.append(number)
filtered_list
###Output
_____no_output_____
###Markdown
Now, how would we filter the `array_3` Numpy array for values less than 0- `np.where`: https://numpy.org/doc/stable/reference/generated/numpy.where.html
###Code
array_3
negative_items = np.where(array_3 < 0)
array_3[negative_items]
###Output
_____no_output_____
###Markdown
Flatten a multidimensional array & conversion to a listCollapsed a nD array into 1D:- `ndarray.flatten()`: https://numpy.org/doc/stable/reference/generated/numpy.ndarray.flatten.html
###Code
array_3.flatten()
###Output
_____no_output_____
###Markdown
Convert the results to a list:- 'ndarray.tolist()`: https://numpy.org/doc/stable/reference/generated/numpy.ndarray.tolist.html?highlight=tolist1D Numpy array to a list:
###Code
array_1
array_1.tolist()
###Output
_____no_output_____
###Markdown
nD Numpy array to a list:
###Code
array_3.flatten().tolist()
###Output
_____no_output_____
###Markdown
Joining arrays Multiple arrays with the same dimensions
###Code
array_1
array_2
###Output
_____no_output_____
###Markdown
**Concatenate**: https://numpy.org/doc/stable/reference/generated/numpy.concatenate.htmlMultiple 1D arrays will create a single larger 1D array
###Code
np.concatenate([array_1, array_2, array_1], axis=0)
###Output
_____no_output_____
###Markdown
Multiple nD arrays, along their first axis (i.e. **axis=0**) - conceptually, this like adding items to existing columns in a table(or as an altertive perspective - **adding more rows**)Let's join `array_3` to itself three times.
###Code
array_3
array_big = np.concatenate([array_3, array_3, array_3], axis=0)
array_big
array_big.shape
###Output
_____no_output_____
###Markdown
Okay, we can present an Numpy array a bit more aesthetically pleasing.Use Pandas to print out the table in a more human (i.e. a scientist) readable form
###Code
pd.DataFrame(array_big)
print(pd.DataFrame(array_big))
###Output
_____no_output_____
###Markdown
Multiple nD arrays, along their second axis (i.e. **axis=1**) - conceptually, this like adding items to existing rows in a table(or as an altertive perspective - **adding more columns**)
###Code
pd.DataFrame(array_3)
## Multiple nD arrays, along their second axis
array_long = np.concatenate([array_3, array_3, array_3], axis=1)
array_long
array_long.shape
pd.DataFrame(array_long)
###Output
_____no_output_____
###Markdown
Multiple arrays with inconsistent (i.e. mixed) dimensions- must pay attention to the dimensions- vertical stacked- horizontal stacked Vertical stacked- nD arrays must be (x, N) and (y, N) where N is the same valueBelow we will combine `array_3` (shape: (2, 5)) with `array_big` (shape: (6, 5)).
###Code
array_3
array_big
array_vstack = np.vstack([array_3, array_big])
array_vstack
array_vstack.shape
###Output
_____no_output_____
###Markdown
Now logically, we can also do this with our array_1 (shape: (5,))
###Code
array_1
array_vstack = np.vstack([array_1, array_3])
array_vstack
array_vstack.shape
###Output
_____no_output_____
###Markdown
When would this not work?Demo when the arrays (ie. (x, N) and (y, N)) have different N values
###Code
array_4 = np.array([[99, 99, 99, 99]])
array_4
print(array_3.shape)
print(array_4.shape)
np.vstack([array_3, array_4])
###Output
_____no_output_____
###Markdown
Horizontal Stacked- nD arrays must be (M, x) and (M, y) where M is the same valueUsing our examples, we need a new array that has (2, x) values since array_3 is (2, y)
###Code
array_5 = np.array([[99], [99]])
array_5
array_3
print(array_3.shape)
print(array_5.shape)
array_hstack = np.hstack([array_5, array_3])
array_hstack
array_hstack.shape
###Output
_____no_output_____
###Markdown
When would this not work?Demo when the arrays (ie. (M, x) and (M, y)) have different M values
###Code
array_big
print(array_4.shape)
print(array_big.shape)
array_hstack = np.hstack([array_4, array_big])
array_hstack
###Output
_____no_output_____
###Markdown
Math with ndarrays- np.add and np.subtract- np.multiple and np.divide- np.power- np.negative (multiplies x by -1) Math performed on a single array
###Code
array_3
###Output
_____no_output_____
###Markdown
Method 1: numpy a function
###Code
np.add(array_3, 5)
###Output
_____no_output_____
###Markdown
Method 2: using Python3's built-in function
###Code
array_3 + 5
###Output
_____no_output_____
###Markdown
Note- Using numpy vs built-in functions doesn't matter **too much** here (in this example) since you are performing the action on a (small) numpy array- Nevertheless, you should try to maximize the use of Numpy functions it when speed is important
###Code
repeat = 1000000
timeit.timeit(lambda:np.add(array_3, 5), number=repeat)
timeit.timeit(lambda:array_3 + 5, number=repeat)
###Output
_____no_output_____
###Markdown
Math operations between arrays- math operations between equal sized arrays is done via element-wise Add and subtract
###Code
array_3 + array_3
array_3 - array_3
###Output
_____no_output_____
###Markdown
Multiplication
###Code
array_3 * array_3
data_6 = [[-1, -1, -1, -1, -1], [-1, -1, -1, -1, -1]]
array_6 = np.array(data_6)
array_3 * array_6
###Output
_____no_output_____
###Markdown
Division
###Code
1/array_3
###Output
_____no_output_____
###Markdown
Raise to a power
###Code
array_3**3
###Output
_____no_output_____
###Markdown
Absolute valuesUsing a Numpy function
###Code
np.absolute(array_3)
timeit.timeit(lambda:np.absolute(array_3), number=repeat)
###Output
_____no_output_____
###Markdown
Python3's built-in function
###Code
abs(array_3)
timeit.timeit(lambda:abs(array_3), number=repeat)
###Output
_____no_output_____
###Markdown
Interestingly, in this case, the built-in `abs` function seems to have the edge over numpy (at least on my local machine). Booleans
###Code
print(array_3 == -6)
###Output
_____no_output_____
###Markdown
Trigonometric- np.sin()- np.cos()- np.arcsin()- etc. Trigonometry on a single input value
###Code
np.sin(-6)
###Output
_____no_output_____
###Markdown
Trigonometry on an numpy array
###Code
np.sin(array_3)
###Output
_____no_output_____
###Markdown
Exponents and logarithms
###Code
x = np.array([2, 3])
## Note that the resulting lists aren't seperated by a comma
## (as seen above) due to the print statement
print(f"x = {x}")
print(f"2^x = {np.exp2(x)} (i.e. 2^2 and 2^3)")
print(f"10^x = {np.power(10, x)} (i.e. 10^2 and 10^3)")
print(f"e^x = {np.exp(x)} (i.e. e^2 and e^3)")
###Output
_____no_output_____
###Markdown
(Recall that you reverse the exponential calculations using log functions.)Taking the above exponential outout and operate on them using log functions:
###Code
x = [4., 8.]
print(f"log2(x) = {np.log2(x)}")
x = [100., 1000.]
print(f"log10(x) = {np.log10(x)}")
x = [7.3890561, 20.08553692]
print(f"ln(x) = {np.log(x)}")
###Output
_____no_output_____
###Markdown
A more complex example Convert temperature values form Celcius to FahrenheitData set: Average temperature in Bonn throughout the calendar year (i.e. January ---> December)
###Code
data_celcius = [2.0, 2.8, 5.7, 9.3, 13.3, 16.5, 18.1, 17.6, 14.9, 10.5, 6.1, 3.2]
array_celcius = np.array(data_celcius)
array_celcius
array_fahrenheit = array_celcius*(9/5) + 32
###Output
_____no_output_____
###Markdown
Visualize the results for clarity using Pandas' built-in function:- convert `np.array` to `pd.DataFrame`
###Code
pd.DataFrame(array_celcius).plot(kind='line', title='Celcius', fontsize=16, legend=None)
pd.DataFrame(array_fahrenheit).plot(kind='line', title='Fahrenheit', fontsize=16, legend=None)
###Output
_____no_output_____
###Markdown
Numpy statistics Side note: numpy's random number generators- generators (e.g. normal/gaussian, geometric, bionomial) https://numpy.org/doc/1.18/reference/random/generator.htmlTwo examples will be given as demonstrations Geometric distributionGenerate a random distribution that contains 10 attempt entries that have a success propobility of 60%, where the distribution itself is governed by a geometric distribution:
###Code
random_geom = np.random.geometric(0.60, size=10)
random_geom
###Output
_____no_output_____
###Markdown
Normal distributionCreate an array (3,3) of Gaussian distributed random values: mean=10.0 and standard deviation=0.1
###Code
random_data = np.random.normal(10.0, 0.1, (3, 3))
random_data
###Output
_____no_output_____
###Markdown
Visualize the random array for clarity(demo by repeating the code above and replotting)
###Code
pd.DataFrame(random_data).plot(kind='line', title='Celcius', fontsize=16, legend=None)
###Output
_____no_output_____
###Markdown
Let's also prove to ourselves that our mean is close to 10 and the standard deviation is close to 0.1
###Code
np.mean(random_data)
np.std(random_data)
###Output
_____no_output_____
###Markdown
Details concerning standard deviation and varianceThis mostly comes from the statistical lecture (which may have been skipped).(Why can't I reproduce results using spreadsheets or Matlab?)
###Code
data = [1, 2, 4, 5, 8]
###Output
_____no_output_____
###Markdown
variance- Libreoffice spreadsheet give a variance of '=VAR(1,2,4,5,8)' of 7.5- I beleive Matlab also gives 7.5Using the statistics's library
###Code
import statistics
statistics.variance(data)
###Output
_____no_output_____
###Markdown
These above results are actually termed 'the sample variance.'However, if you use NumPy by simply typing:
###Code
np.var(data)
###Output
_____no_output_____
###Markdown
In this case there is a "hidden" variable called `ddof` ("Delta Degrees of Freedom") - the denomenator is divided by 'N -ddof'https://numpy.org/doc/1.18/reference/generated/numpy.var.html?highlight=variance- population: "ddof=0 provides a maximum likelihood estimate of the variance for normally distributed variables"- sample: "ddof=1 provides an unbiased estimator of the variance of a hypothetical infinite population"The same is true for standard deviation.Population variance:
###Code
np.var(data, ddof=0)
###Output
_____no_output_____
###Markdown
Sample variance (always larger than the population variance):
###Code
np.var(data, ddof=1)
###Output
_____no_output_____
###Markdown
Standard deviation demoLibreoffice gives '=stdev(1,2,4,5,8)' of 2.7386127875And statistics library gives:
###Code
statistics.stdev(data)
###Output
_____no_output_____
###Markdown
Numpy's sample standard deviation
###Code
np.std(data, ddof=1)
###Output
_____no_output_____
###Markdown
Numpy's population standard deviation
###Code
np.std(data, ddof=0)
###Output
_____no_output_____
###Markdown
**Take home message**: you should always take a look at NumPy's manual to make sure you are doing what you think you are doing -- keep an eye out for default settings (e.g. ddof=0). Addtional resource to further learn and test your knowledge: https://github.com/rougier/numpy-100 And finally, some weirdness The following should provide a mean value of 1.0(i.e. sum of the numbers is 4 and then divide by 4)
###Code
large_numbers_list = [1e30, 1, 3, -1e30]
statistics_mean = statistics.mean(large_numbers_list)
statistics_mean
np_mean = np.mean(large_numbers_list)
np_mean
np_sum = np.sum(large_numbers_list)
np_sum
###Output
_____no_output_____
###Markdown
This appears to be coming from the data type
###Code
np.array(large_numbers_list).dtype
np_sum = np.mean(large_numbers_list, dtype=np.float64)
np_sum
np_sum = np.mean(large_numbers_list, dtype=np.int8)
np_sum
###Output
_____no_output_____
###Markdown
Numpy working with arrays in Python Prof. Robert Quimby© 2018 Robert Quimby In this tutorial you will... * create 1- and 2-dimensional `numpy` arrays* see how to access individual elements and array slices* perform basic mathematical operations on each array element* learn about some of the most useful `numpy` functions* get a quick introduction to `numpy` structured arrays Full documentation * http://www.numpy.org/* [numpy tutorial](https://docs.scipy.org/doc/numpy/user/quickstart.html) Motivation
###Code
list1 = [1, 2, 3, 4, 5]
list2 = [3, 4, 2, 6, 4]
# compute the item-wise list sum
###Output
_____no_output_____
###Markdown
Load the Numpy package `numpy` array operations
###Code
a1 = np.array([1, 2, 3, 4, 5])
a2 = np.array([3, 4, 2, 6, 4])
###Output
_____no_output_____
###Markdown
Array indexing and slicing
###Code
print(a1)
###Output
_____no_output_____
###Markdown
2-D arrays `numpy` arrays are mutable
###Code
a = np.array([1, 2, 3, 4, 5])
###Output
_____no_output_____
###Markdown
Useful `numpy` functions
###Code
np.arange()
np.linspace()
np.zeros()
np.ones()
###Output
_____no_output_____
###Markdown
Trig functions
###Code
np.cos()
###Output
_____no_output_____
###Markdown
Useful `numpy` array attributes
###Code
a2.shape
a2.size
###Output
_____no_output_____
###Markdown
Useful `numpy` array methods
###Code
a1.mean()
print()
a2.sum()
a2.min()
a2.argmin()
###Output
_____no_output_____
###Markdown
Truth arrays
###Code
a1 > 3
###Output
_____no_output_____
###Markdown
Structured arrays For more on `numpy` structured arrays see:* https://docs.scipy.org/doc/numpy/user/basics.rec.htmlstructured-arrays
###Code
dir(a2)
help(a2)
###Output
_____no_output_____
###Markdown
連立1次方程式 Ax = bをやってみる
###Code
# numpyをインポートして名前をnpにする.numpyのメソッドは,np.***で使える.
import numpy as np
# 行列
A = np.array([[1,2,3],[4,5,6],[7,8,9]])
A
# 表示
print(A)
# ベクトル
x = np.array([[1],[2],[3]])
print(x)
# 行列とベクトルの演算
b = np.dot(A, x)
print(b)
# こちらは違う
A * x
# わかりやすく実行すると
x = np.array([[10],[100],[1000]])
A * x
# 定数倍
A * 2
###Output
_____no_output_____
###Markdown
行列の初期化
###Code
# ゼロで初期化
A = np.zeros((3,4))
print(A)
# 乱数で初期化(0.0から1.0の一様乱数)
A = np.random.rand(2,3)
print(A)
# 乱数で初期化(標準正規分布 (平均0, 標準偏差1))
A = np.random.randn(2,3)
print(A)
# せっかくなので標準正規分布を描いてみよう
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
x = np.random.randn(10000)
y = plt.hist(x, bins=50) # binsは区切る個数
plt.show()
###Output
_____no_output_____
###Markdown
固有値,固有ベクトル
###Code
A = np.array([[6,2],[2,3]])
la, v = np.linalg.eig(A)
print(la)
print(v)
print(2/np.sqrt(5), 1/np.sqrt(5))
print(-1/np.sqrt(5), 2/np.sqrt(5))
print(v[0])
print(v[:,0])
###Output
[ 7. 2.]
[[ 0.89442719 -0.4472136 ]
[ 0.4472136 0.89442719]]
0.894427191 0.4472135955
-0.4472135955 0.894427191
[ 0.89442719 -0.4472136 ]
[ 0.89442719 0.4472136 ]
###Markdown
逆行列,転置,内積
###Code
A = np.array([[1,2],[3,4]])
print(A)
invA = np.linalg.inv(A)
print(invA)
np.dot(invA, A)
a = np.array([1,2,3])
b = np.array([10,20,30])
np.dot(a,b)
###Output
_____no_output_____
###Markdown
NumPy (www.numpy.org)NumPy is important in scientific computing, it is coded both in Python and C (for speed). A few important features for Numpy are:a powerful N-dimensional array objectsophisticated broadcasting functionstools for integrating C/C++ and Fortran codeuseful linear algebra, Fourier transform, and random number capabilitiesNext, we will introduce Numpy arrays, which are related to the data structures.In order to use Numpy module, we need to import it first. A conventional way to import it is to use “np” as a shortened name using```pythonimport numpy as np```Numpy has a detailed guide for users migrating from Matlab. Just google 'Numpy for Matlab Users'
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
If the previously line produces an error, then you need to install numpy.Please type```python!pip install numpy ```
###Code
#to create an array, we use the numpy funcion array
x = np.array([[1,2,3]])
x
###Output
_____no_output_____
###Markdown
Arrays are entered by rows, each row is defined as a list. To create a 2d array, simply use nested lists
###Code
y = np.array([[1,2,3],[4,5,6]])
y
###Output
_____no_output_____
###Markdown
The arrays created with numpy are objects and have many atributes associated with them. For example, the shape of an array can be found with *shape*, and its size with *size*
###Code
y.shape
y.size
x.shape
x.size
###Output
_____no_output_____
###Markdown
You can access the elements in the array by index. There are multiple ways to access the element in the array
###Code
x[0], x[1],x[2]
x[3]
y[0],y[0][0],y[0][1],y[0][2]
y[1],y[1][0],y[1][1],y[1][2]
y[0,0],y[0,1],y[0,2],y[1,0],y[1,1],y[1,2]
###Output
_____no_output_____
###Markdown
In this form, the first index represents the row and the second index represents the column.You can also use slices to obtain a section of the array:
###Code
# What result will you obtain after this operation?
y[:,:2]
# What result will you obtain after this operation?
y[:,-2:]
# you an also access mutiple rows or columns by index
y[:,[0,1]]
y[:,[0,2]]
###Output
_____no_output_____
###Markdown
NumPy includes methods to generate arrays that have a structure. - *arrange* -> generates arrays that are in order and evenly spaced,- *linspace* -> generates an array of n equally spaced elements starting from a defined begining and end points
###Code
# np.arange requires three parameters:
# The starting point, the end point, and the increment
# NOTE: the end point is not inclusive
np.arange(0.5, 3, 0.5)
large_array = np.arange(0,2000,1)
large_array
large_array.size
# np.linspace requires three parameters:
# The starting point, the end point, and
# the number of elements
# NOTE: the end point is inclusive
np.linspace(0.5, 3, 6)
np.linspace(0, 1999, 2000)
###Output
_____no_output_____
###Markdown
NumPy includes some predefined arrays that can make your life easier
###Code
np.zeros((5,5))
np.zeros_like(y)
np.zeros((3,))
np.zeros_like(x)
np.ones((5, 5))
np.empty((5, 1))
np.empty((1,5))
np.empty((5))
###Output
_____no_output_____
###Markdown
You can use the assigment operator to modify one or multiple elements in your array
###Code
# if you don't provide the increment, np.arange will
# use a default value of 1
a = np.arange(1, 7)
a
#to change the element in the index position 4, we can do
a[4] = 10
a
# to change the elements from 4 to the end we can do
a[4:] = [45,32]
a
#python will let you know if you made a mistake
a[4:] = [43,32,55]
# exercise
# to change the elements from index 2 to 5 (inclusive) to zero.
# we can do??
a[2:6] = np.zeros((1,4))
a
###Output
_____no_output_____
###Markdown
Exercise:Create a zero array b with shape 2 by 2, and set $$ 𝑏=\begin{bmatrix}1&2 \\ 3& 4\end{bmatrix}$$using array indexing.
###Code
b = np.zeros((2,2))
b
b[0,0] = 1
b[0,1] = 2
b[1,0] = 3
b[1,1] = 4
b[0] = [1,2]
b[1] = [3,4]
b
###Output
_____no_output_____
###Markdown
NumPy has powerful broadcasting abilities. You can do mathematical operation with arrays of different sizes and NumPy will take care of the operation if possible Operations with scalars
###Code
b = np.array([[0,1],[2,3]])
c = 2
b+c
b-c
b*c
b/c
b**c
###Output
_____no_output_____
###Markdown
Operations between arrays
###Code
b = np.array([[0,1],[2,3]])
d = np.array([[4,5],[6,7]])
b+d
b-d
b*d
b/d
b**d
###Output
_____no_output_____
###Markdown
The *, /, and ** operations are operating on an element by element basis. Operations between arrays of different sizes
###Code
b = np.array([[0,2],[3,4]])
d = np.array([[4],[5]])
b+d
###Output
_____no_output_____
###Markdown
Can you explain what is going on?
###Code
b-d
b*d
b/d
b**d
###Output
_____no_output_____
###Markdown
Matrix Multiplication
###Code
b = np.array([[0,1],[2,3]])
d = np.array([[4,5],[6,7]])
e = np.array([[4],[5]])
f = np.array([[4,5]])
b@d, np.matmul(b,d)
b@e
# NumPy will tell you when you make a mistake
b@f
# the .T atributes computes the transpose of a matrix
# it has precedence over other operations
[email protected]
###Output
_____no_output_____
###Markdown
NumPy can also apply logical operations between arrays and scalars or between two arrays of the same size
###Code
x = np.array([1, 2, 4, 5, 9, 3])
y = np.array([0, 2, 3, 1, 2, 3])
x>3
###Output
_____no_output_____
###Markdown
Python can index elements of an array that satisfy a logical expression.
###Code
x[x>3]
# you can also use multiple conditions
x[np.logical_or(x<3,x>=5)]
x[np.logical_and(x<=9,x>=5)]
###Output
_____no_output_____
###Markdown
you can also use the assignment operator to modify an array based on conditions
###Code
y = x[x>3]
y
y[y>=9] = 0
y
###Output
_____no_output_____
###Markdown
Numpy* Numpy是高性能科学计算和数据分析的基础包。它是pandas等其他各种工具的基础。* Numpy的主要功能: * ndarray,一个多维数组结构,高效且节省空间 * 无需循环对数组数据进行快速运算的数学函数 * 读写磁盘数据的工具及操作内存映射文件的工具 * 线性代数、随机数生成及傅里叶变换功能 * 用于集成C、C++等代码的工具 安装:```scriptpip install numpy```导入方式:```pythonimport numpy as np```国内源:* 中国科技大学 https://pypi.mirrors.ustc.edu.cn/simple/* 清华 https://pypi.tuna.tsinghua.edu.cn/simple安装完成后请尝试以下代码:
###Code
import numpy as np
np.array([1, 2, 3, 4, 5]) # 创建一个ndarray
for i in _: # 此行的"_"代表上一行的返回值
print(i)
###Output
_____no_output_____
###Markdown
请比较此处上下相关的两部分代码!```np.array```比```list```有以下好处:* 运算速度比列表快* 内存占用比列表小
###Code
# 下面代码显示np.array的类型
import numpy as np
a = np.array([1, 2, 3, 4, 5])
print(type(a)) # numpy.ndarray
# 下面的代码查看对象所占的数据类型
import sys
a = list(range(100))
print(sys.getsizeof(a)) # 显示占用856个字节
b = np.array(range(100))
print(sys.getsizeof(b)) # 显示占用504个字节
# 以随机数方式创建价格列表及数量列表
import random
import numpy as np
# 生成价格列表
pricelist = [round(random.uniform(10.0, 100.0), 2) for i in range(20)]
pricelist_np = np.array(pricelist)
# 生成数量列表
numlist = [random.randint(1, 10) for i in range(20)]
numlist_np = np.array(numlist)
def getTotal(plist, nlist):
total = 0
for i, j in zip(plist, nlist):
total += i * j
return total
# 求和
getTotal(pricelist, numlist)
timeit getTotal(pricelist, numlist)
# 求和,矢量积,笛卡尔积
np.dot(pricelist_np, numlist_np)
timeit np.dot(pricelist_np, numlist_np)
timeit (pricelist_np * numlist_np).sum()
pricelist_np
# ndarray的其他算法
pricelist_np + 1
# 查看数据类型,ndarray必须是相同的数据类型
pricelist_np.dtype
###Output
_____no_output_____
###Markdown
Numpy的ndarray* dtype * bool_, int(8, 16, 32, 64), uint(8, 16, 32, 64), float(16, 32, 64) * 类型转换:astype()* 创建ndarray: np.array() * array(),将列表转换为数组,可指定dtype * arange(),numpy版的range(),支持浮点数 * linspace(), 类似arange(),第三个参数为数组长度 * zeros(),创建0数组 * ones(),创建1数组 * empty(),创建空数组,随机值 * eye(),根据指定边长创建单位矩阵* 为什么要使用ndarray: * examp 1:已知若干公司的美元市值,求其人民币市值 * examp 2:已知购物车中商品的价格及件数,求总价* ndarray可以是多维数组,但元素的类型必须相同* 常用属性: * T,数组的转置 * dtype,数组元素的数据类型 * size,数组元素的个数 * ndim,数组的维度数 * shape,数组维度的大小
###Code
# 创建nparray
import numpy as np
a = np.arange(100)
a.shape
# 创建多维数组
import numpy as np
a = np.array([[1, 2, 3], [4, 5, 6]])
a.size
a.shape
a.ndim
a.T
###Output
_____no_output_____
###Markdown
Một vài phép tính với vector Góc giữa các vectorDot product giữa 2 vectors chia cho tích của norm của chúng$$ cos(\theta) = \frac{\overrightarrow{x}\overrightarrow{y}}{\|\overrightarrow{x}\|\|\overrightarrow{y}\|} $$Tính thử với v = [1, 4, 5] và u = [2 1 5]
###Code
def angle_between(v1, v2):
dot_pr = v1.dot(v2)
norms = np.linalg.norm(v1) * np.linalg.norm(v2)
return np.rad2deg(np.arccos(dot_pr/norms))
v = np.array([1, 4, 5])
u = np.array([2, 1, 5])
print(angle_between(v, u))
# tính điểm trung bình các kỳ kiểm tra
gArray.dot([0.3, 0.3, 0.4])
# nâng điểm
scaling = [1.1, 1.05, 1.03]
np.diag(scaling)
gArray.dot(np.diag(scaling))
# % điểm mỗi bài thi so với điểm cao nhất (curving)
gArray.max(axis=0)
maxInExam = gArray.max(axis=0)
gArray.dot(np.diag(100/maxInExam)).round()
###Output
_____no_output_____
###Markdown
Linear Regression (xấp xỉ tuyến tính)Giả thiết một mối quan hệ tuyến tính giữa biến đầu vào (**X**) và đầu ra (**y**).Mỗi biến đầu vào được gán cho một hệ số (coefficient) Beta (**β**), một hệ số tự do nữa được thêm vào cho phép đường thẳng di chuyển lên xuống được gọi là intercept hay **bias coefficient**Hàm xấp xỉ tuyến tính đơn giản được viết: $$y = \beta_0 + \beta_1 x$$"Học" linear regression chính là **ước tính giá trị của các hệ số của phương trình trên với dữ liệu mà ta có**. Các giả định của Linear Regression1. Mối quan hệ giữa các biến số là tuyến tính2. Dữ liệu không bị nhiễu (noise). Ta phải lọc các trường hợp ngoại lệ (outliers) tối đa có thể3. Mô hình sẽ bị overfit khi các biến đầu vào tương quan nhiều với nhau 4. Cho kết quả tốt hơn nếu các biến đầu vào tuân theo phân phối chuẩn (normal distribution). Nếu không, ta nên dùng các phép biến (transformation) để "nắn" dữ liệu chuẩn hơn5. Nên đưa về quãng [0, 1] để có ước tính tốt hơn Công thức tính Beta CoefficientThực tế không cần học mà có thể tính trực tiếp theo công thức$$ \beta_1 = \frac{\sum_{i=1}^{n} (x_i - mean(x)) * (y_i - mean(y))}{\sum_{i=1}^{n}(x_i - mean(x))^2} $$$$ \beta_0 = mean(y) - \beta_1 * mean(x)$$
###Code
# tự tạo dữ liệu với 300 điểm
x = np.arange(1,301)
y = np.random.normal(x + 2, 50)
plt.figure(figsize=(30,10))
plt.scatter(x[:100], y[:100])
plt.show()
beta1 = sum((x - np.mean(x))*(y-np.mean(y)))
beta1 = beta1 / sum((x - np.mean(x))**2)
beta0 = np.mean(y) - beta1 * np.mean(x)
print(beta0, beta1)
def pred(x):
return beta0 + beta1*x
y_preds = pred(x)
y_preds[:10]
plt.figure(figsize=(16,9))
plt.scatter(x,y, s=128, alpha=0.5, label='Y')
plt.plot(np.arange(1,301), y_preds, color='red', linewidth=5, label='Linear Predicted Y')
plt.show()
###Output
_____no_output_____
###Markdown
Cách khác đơn giản hơnĐối với $\beta_1$ ta có công thức khác đơn giản hơn$$ \beta_1 = corr(x,y) * \frac{stdev(y)}{stdev(x)}$$
###Code
b1 = np.corrcoef(x,y)[0][1] * (np.std(y) / np.std(x))
b0 = np.mean(y) - b1 * np.mean(x)
print(b0, b1)
# lưu ý corrcoef trả về ma trận 2x2
###Output
2.1643799335386404 1.0008259472474048
###Markdown
Chấm điểm modelCó nhiều cách để đánh giá mô hình xấp xỉ này, chúng ta sẽ dùng Root Mean Squared Error, được tính như sau:$$ RMSE = \sqrt{\frac{\sum_{i=1}^{n}(p_i - y_i)^2}{n}} $$ với $p_i$ là giá trị dự đoán
###Code
def get_rmse(y_pred, y):
return
###Output
_____no_output_____
###Markdown
NumPyNumPy ist ein Erweiterungsmodul für numerische Berechnungen mit Python. Es beinhaltet grundlegende Datenstrukturen, sprich Matrizen und mehrdimensionale Arrays. Selbst ist NumPy in C umgesetzt worden und bietet mithilfe der Python-Schnittstelle die Möglichkeit Berechnungen schnell durchzuführen. Die Module SciPy, Matplotlib und Pandas greifen auf die erwähnten Datenstrukturen zurück, daher stellt NumPy das Fundament der Scientific-Python-Libraries auf.Mehr zu NumPy auf der offiziellen Website: http://numpy.org/ Download von NumPyMit Python wird automatisch pip (Package-Manager für Python-Module auf PyPI.org) installiert. Selbst steht pip für "pip installs packages", was der Komandosyntax entspricht mit der Python-Module heruntergeladen werden.
###Code
# nicht starten, da NumPy bereits installiert wurde und die notwendigen Rechte fehlen
!pip3 install numpy
###Output
Requirement already satisfied: numpy in /Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages (1.17.4)
###Markdown
Verwenden von Math
###Code
from math import *
zahlen = [1, 2, 3, 4, 5, 6]
ergebnis = []
for x in zahlen:
y = sin(x)
ergebnis.append(y)
print(ergebnis)
type(zahlen)
###Output
_____no_output_____
###Markdown
Verwenden von NumPy Arrays / Vektoren $zahlen = \left( \begin{array}{ccc} 1 \\ 2 \\ 3 \\ 4 \\ \end{array} \right)$ Hinweis: Die Sinus-Funktion `sin()` aus dem Modul `math` und `numpy` sind nicht dieselben! Python erkennt anhand des Typs von `zahlen` auf welche Sinus-Funktion zugegriffen werden soll.- `math` -> `list`- `numpy` -> `numpy.ndarray` Typen der NumPy-WerteDas Array `zahlen` enthält nur Integers (Ganze Zahlen), daher wird der Typ des Vektors auf `int64` gesetzt. Die Ausgabe von `ergebnis` gibt bei der Berechnung der Sinuswerte von `zahlen` als Typ Float (Gleitpunktzahlen/Dezimalzahlen), also `float64` an. Definition des Typs der Arrays
###Code
# Ausgabe einer Gleitpunktzahl
# Ausgabe einer Komplexen Zahl
###Output
_____no_output_____
###Markdown
Matrizen$M_1\ = \left( \begin{array}{ccc} 1 & 2 & 3 \\ 4 & 5 & 6 \\ \end{array} \right)$ Anzeigen der Dimension der Matrix Spezielle Funktionen 3x3-Nullmatrix 3x4-Einheitsmatrix Nullvektor Einheitsvektor `arange()` und `linspace()` für Sequenzen von Zahlen Syntax: `arange(startwert, endwert, inkrement/schrittweite)`Hinweis: Wie in der `range()`-Funktion ist der Startwert inklusiv und der Endwert exklusiv. Syntax: `linspace(startwert, endwert, anzahl der arrays)` Operationen $x=\left( \begin{array}{ccc} 1 \\ 2 \\ 3 \\ 4 \\ 5 \\ \end{array} \right)$ $y=\left( \begin{array}{ccc} 2 \\ 4 \\ 6 \\ 8 \\ 10 \\ \end{array} \right)$ Addition$\left( \begin{array}{ccc} 1 \\ 2 \\ 3 \\ 4 \\ 5 \\ \end{array} \right) + \left( \begin{array}{ccc} 2 \\ 4 \\ 6 \\ 8 \\ 10 \\ \end{array} \right) = \left( \begin{array}{ccc} 3 \\ 6 \\ 9 \\ 12 \\ 15 \\ \end{array} \right)$ Subtraktion$\left( \begin{array}{ccc} 1 \\ 2 \\ 3 \\ 4 \\ 5 \\ \end{array} \right) - \left( \begin{array}{ccc} 2 \\ 4 \\ 6 \\ 8 \\ 10 \\ \end{array} \right) = \left( \begin{array}{ccc} -1 \\ -2 \\ -3 \\ -4 \\ -5 \\ \end{array} \right) $ Erweiterung$\left( \begin{array}{ccc} 1 \\ 2 \\ 3 \\ 4 \\ 5 \\ \end{array} \right) \cdot 4 = \left( \begin{array}{ccc} 4 \\ 8 \\ 12 \\ 16 \\ 20 \\ \end{array} \right) $ Achtung!-> Sehr gewöhnungsbedürftig ist, dass die Multiplikation und Division, als auch die Potenz und Wurzel von Arrays und Matrizen möglich ist MultiplikationHinweis: Nicht zu verwechseln mit dem Skalarprodukt!$\left( \begin{array}{ccc} 1 \\ 2 \\ 3 \\ 4 \\ 5 \\ \end{array} \right) \cdot \left( \begin{array}{ccc} 2 \\ 4 \\ 6 \\ 8 \\ 10 \\ \end{array} \right) = \left( \begin{array}{ccc} 2 \\ 8 \\ 18 \\ 32 \\ 50 \\ \end{array} \right) $ Division$\left( \begin{array}{ccc} 1 \\ 2 \\ 3 \\ 4 \\ 5 \\ \end{array} \right) / \left( \begin{array}{ccc} 2 \\ 4 \\ 6 \\ 8 \\ 10 \\ \end{array} \right) = \left( \begin{array}{ccc} 0.5 \\ 0.5 \\ 0.5 \\ 0.5 \\ 0.5 \\ \end{array} \right) $ Potenz$\left( \begin{array}{ccc} 1 \\ 2 \\ 3 \\ 4 \\ 5 \\ \end{array} \right) ^2\ = \left( \begin{array}{ccc} 1 \\ 4 \\ 9 \\ 16 \\ 25 \\ \end{array} \right)$ Hinweis: Die Verwendung der `pow()`-Funktion aus dem `math`-Modul führt zu einer Fehlermeldung. Wurzel$\sqrt{\left( \begin{array}{ccc} 1 \\ 2 \\ 3 \\ 4 \\ 5 \\ \end{array} \right)} = \left( \begin{array}{ccc} 1.000 \\ 1.414 \\ 1.732 \\ 2.000 \\ 2.236 \\ \end{array} \right)$ Hinweis: Die Verwendung der `sqrt()`-Funktion aus dem `math`-Modul führt zu einer Fehlermeldung. Vektoren- und Matrizenberechnungen Skalarprodukt (auch Innere Produkt)$a\cdot b = \left( \begin{array}{ccc} 1 \\ 2 \\ 3 \\ \end{array} \right) \cdot \left( \begin{array}{ccc} 0 \\ 1 \\ 0 \\ \end{array} \right) = 2 $
###Code
a = np.array([1,2,3])
b = np.array([0,1,0])
###Output
_____no_output_____
###Markdown
Matrizenprodukt
###Code
a = np.array([[1,2],[3,4]])
b = np.array([[11,12],[13,14]])
A = np.array([[11, 12, 13, 14], [21, 22, 23, 24], [31, 32, 33, 34]])
B = np.array([[5, 4, 2], [1, 0, 2], [3, 8, 2], [24, 12, 57]])
# print(np.inner(A, B)) # Fehlermeldung
###Output
[[ 442 316 870]
[ 772 556 1500]
[1102 796 2130]]
[[ 442 316 870]
[ 772 556 1500]
[1102 796 2130]]
###Markdown
Kreuzprodukt$a\times b = \left( \begin{array}{ccc} 1 \\ 2 \\ 3 \\ \end{array} \right) \times \left( \begin{array}{ccc} 0 \\ 1 \\ 0 \\ \end{array} \right) = \left( \begin{array}{ccc} -3 \\ 6 \\ -3 \\ \end{array} \right) $
###Code
x = np.array([1, 2, 3])
y = np.array([4, 5, 6])
###Output
_____no_output_____
###Markdown
NumpyNumpy is a convenient, Pythonic _toolkit_ for manipulating raw memory. It's primarily intended for data analysis applications:
###Code
import numpy
array = numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
array[5:7]
array[array > 3]
###Output
_____no_output_____
###Markdown
But it also lets you do bare metal stuff, like byte-swapping and casting.
###Code
array.byteswap()
array.view(dtype="int32")
###Output
_____no_output_____
###Markdown
A Numpy array object (`ndarray`) is essentially just a C array with a Python object attached. The Python object manages everything that is ordinarily unsafe about C arrays: * the length (to prevent overwrites) * the type (to prevent unintended casting), including unsigned integers * the byte order (ditto) * C vs Fortran order for multidimensional arrays (e.g. which index runs contiguously in memory: the first or last?) * whether this object owns the array or if it is just a "view". Usually, when you create a new Numpy array (sometimes implicitly in an expression involving arrays), you want Numpy to allocate a new memory buffer and let the `ndarray` object own it. That is, when the `ndarray` is deleted, the buffer gets freed.For performance, some Numpy operations give you a "view" into another array, rather than a copy:
###Code
subarray = array[5:]
subarray
subarray[2] = 999.99
array
###Output
_____no_output_____
###Markdown
You can identify a "view" because it has a "base" reference to the array that it's viewing. By maintaining a reference, the view can ensure that the base doesn't get garbage collected until they're both out of scope.
###Code
subarray.base is array
array.base is None
###Output
_____no_output_____
###Markdown
But there's yet another case: sometimes you have a buffer already and want Numpy to wrap it. Maybe you want to use some of Numpy's vectorized functions on the data, or maybe you want to pass it to some software that only recognizes data in Numpy format (`` machine learning ``).Anything that satisfies Python's "buffer" interface can become an `ndarray`.
###Code
string = "hello there"
array = numpy.frombuffer(string, dtype=numpy.uint8)
array
map(chr, array)
array.base is string
###Output
_____no_output_____
###Markdown
With some effort, Numpy arrays can even wrap arbitrary regions of memory, given by an integer-valued pointer.
###Code
import ctypes
libc = ctypes.cdll.LoadLibrary("libc.so.6")
libc.malloc.restype = ctypes.POINTER(ctypes.c_double)
ptr = libc.malloc(4096)
ptr
ptr.__array_interface__ = {
"version": 3,
"typestr": numpy.ctypeslib._dtype(type(ptr.contents)).str,
"data": (ctypes.addressof(ptr.contents), False),
"shape": (4096,)
}
array = numpy.array(ptr, copy=False)
array
###Output
_____no_output_____
###Markdown
Snake eating its tail againHave you ever wondered what Python structs look like? You don't have to use the C API to delve into this. The `id(obj)` for some `obj` happens to be a numerical pointer to the object in memory. This fact is not guaranteed in future versions of Python (nor is it true in alternate implementations, such as Jython), but it's true for now.
###Code
string = "hello there"
id(string)
ptr = ctypes.cast(id(string), ctypes.POINTER(ctypes.c_uint8))
ptr.__array_interface__ = {
"version": 3,
"typestr": numpy.ctypeslib._dtype(type(ptr.contents)).str,
"data": (ctypes.addressof(ptr.contents), False),
"shape": (64,)
}
array = numpy.array(ptr, copy=False)
print map(chr, array)
###Output
_____no_output_____
###Markdown
Numpyhttp://www.numpy.org/NumPy是用Python进行科学计算的基础软件包。它包含以下内容:- 一个强大的N维数组对象- 复杂的(广播)功能- 用于集成C / C ++和Fortran代码的工具- 有用的线性代数,傅里叶变换和随机数能力- 除了明显的科学用途外,NumPy还可以用作通用数据的高效多维容器。可以定义任意数据类型。这使得NumPy能够与各种数据库无缝并快速地整合。
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
一、创建ndarray
###Code
a = [1,2,3]
a1 = np.array(a)
display(a1)
###Output
_____no_output_____
###Markdown
1. 使用np.array() > 注意:- numpy默认ndarray的所有元素的类型是相同的- 如果传进来的列表中包含不同的类型,则统一为同一类型,优先级:str>float>int
###Code
b = [1,1.0,'1']
b1= np.array(b)
display(b1)
###Output
_____no_output_____
###Markdown
2.二维array
###Code
c = [[1,2],[3,4]]
c1 = np.array(c)
display(c1)
np.matrix(c)
###Output
_____no_output_____
###Markdown
二、ndarray的属性 4个必记参数: ndim:维度 ; shape:形状(各维度的长度); size:总长度; dtype:元素类型
###Code
c1
c1.ndim
c1.shape
c1.size
c1.dtype
###Output
_____no_output_____
###Markdown
其他- array.flatten() 化为一维- np.ravel() 化为一维- np.save('p.npy', p) 保存到文件- np.load('p.npy') 从文件读取
###Code
c1
c1.flatten()
c1.ravel()
c1
np.save('/Users/huwang/Desktop/p.npy',c1)
np.load('/Users/huwang/Desktop/p.npy')
###Output
_____no_output_____
###Markdown
维度 axis选择> - axis就表示维度的方向,1为列之间操作(也就是沿着横轴进行运算),0为行之间操作(也就是沿着纵轴进行运算)- 维数超过3,则无行列概念,axis就代表对应的维数 三、ndarray的聚合操作 Function Name NaN-safe Version Description np.sum np.nansum Compute sum of elements np.prod np.nanprod Compute product of elements 乘法 np.mean np.nanmean Compute mean of elements np.std np.nanstd Compute standard deviation np.var np.nanvar Compute variance np.min np.nanmin Find minimum value np.max np.nanmax Find maximum value np.argmin np.nanargmin Find index of minimum value np.argmax np.nanargmax Find index of maximum value np.median np.nanmedian Compute median of elements np.percentile np.nanpercentile Compute rank-based statistics of elements np.any N/A Evaluate whether any elements are true np.all N/A Evaluate whether all elements are true np.power 幂运算
###Code
c1
c1.sum(axis=1)
c1.sum(axis=0)
c1.sum()
###Output
_____no_output_____
###Markdown
np.sum 和 np.nansum 的区别nan not a number 四、 函数创建 1) np.ones(shape, dtype=None, order='C')
###Code
np.ones(shape=(2,2),dtype='float')
###Output
_____no_output_____
###Markdown
2) np.zeros(shape, dtype=float, order='C')
###Code
np.zeros(shape=(3,3,3))
###Output
_____no_output_____
###Markdown
3) np.full(shape, fill_value, dtype=None, order='C')
###Code
np.full(shape=(3,3,2),fill_value=100)
###Output
_____no_output_____
###Markdown
4)np.eye(N, M=None, k=0, dtype=float) 单位矩阵 N行M列 K:偏移量
###Code
np.eye(10,10,k=0)
np.eye(10,10,k=2)
###Output
_____no_output_____
###Markdown
5) np.linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None)- endpoint 是否包括stop- retstep 是否显示步长
###Code
np.linspace(0,10,num=5,retstep=True,endpoint=False)
###Output
_____no_output_____
###Markdown
6) np.arange([start, ]stop, [step, ]dtype=None)
###Code
np.arange(0,10,step=2)
np.arange(10,step=2)
np.arange(10)
###Output
_____no_output_____
###Markdown
7) np.random.randint(low, high=None, size=None, dtype='l')- np.random为Numpy中的随机类- randint 为整数型- 'l' 代表int
###Code
np.random.randint(0,10,size=(2,2))
###Output
_____no_output_____
###Markdown
8) numpy.random.randn(d0, d1, …, dn)是从标准正态分布中返回一个或多个样本值。numpy.random.rand(d0, d1, …, dn)的随机样本位于[0, 1)中d_[]为维度
###Code
np.random.randn(1,2,3,4,5,6)
###Output
_____no_output_____
###Markdown
9)标准正太分布 np.random.normal(loc=0.0, scale=1.0, size=None)- 可以控制期望值和方差变化- loc 期望值 默认0- scale 方差 默认值1
###Code
np.random.rand(2,2)
###Output
_____no_output_____
###Markdown
10)np.random.random(size=None)生成0到1的随机数,左闭右开
###Code
np.random.random(size=(2,2))
###Output
_____no_output_____
###Markdown
练习一- 生成一个二维的数组并自选取聚合操作- 随机生成三维RGB数组> 提示: - 每个元素大小在大小在0-1之间,或者0-255之间( png 0-1, jpg 0-255 ) - 使用: - import matplotlib.pyplot as plt - %matplotlib inline - plt.imshow(array) 展示
###Code
import matplotlib.pyplot as plt
%matplotlib inline
data = np.random.random(size=(50,50,3))
# display(data)
plt.imshow(data)
plt.imshow(data)
###Output
_____no_output_____
###Markdown
五 ndarray的基本操作 1. 索引一维与列表完全一致 多维时同理 一维数组索引
###Code
g = np.arange(1,10)
display(g)
###Output
_____no_output_____
###Markdown
A、正序
###Code
g[0]
###Output
_____no_output_____
###Markdown
B、逆序 == 总长度 - 正序值比如 -1 == 总长度 - 1
###Code
g[-1]
###Output
_____no_output_____
###Markdown
c、由索引获取相应的值
###Code
g[4]
g[-5]
###Output
_____no_output_____
###Markdown
多维数组索引
###Code
g1 = np.random.randint(0,10,size=(2,2,2))
display(g1)
###Output
_____no_output_____
###Markdown
A、获取某一维度
###Code
g1[0][0][0]
g1[0][1][1]
###Output
_____no_output_____
###Markdown
B、获取某一维度下的元素
###Code
g1[0,1,1]
# g1[0][1][1][3]
###Output
_____no_output_____
###Markdown
2.索引切片 一维切片
###Code
g3 = np.arange(0,10)
display(g3)
###Output
_____no_output_____
###Markdown
A、正向切片
###Code
g3[2:5]
###Output
_____no_output_____
###Markdown
B、逆向切片
###Code
g3[-6:-3]
###Output
_____no_output_____
###Markdown
C、步长切片array[start:end:step]
###Code
g3[0:-1:2]
###Output
_____no_output_____
###Markdown
D、逆转切片
###Code
g3[::-1]
###Output
_____no_output_____
###Markdown
多维数组切片> 降维式切
###Code
g4 = np.random.randint(0,100,size=(3,3,3))
display(g4)
g4[1,0:3,0:2]
g4[1,:,0:2]
g4[:,:,1:3]
###Output
_____no_output_____
###Markdown
A、正向切片
###Code
g4[0][0][1:3]
g4[0,0,1:3]
g4[0,0:2,0]
g4[1,1:3,1:3]
###Output
_____no_output_____
###Markdown
B、逆向切片
###Code
g4
g4[-2,:,0]
g4[-1,0,:]
g4[1,1,:]
###Output
_____no_output_____
###Markdown
C、步长切片 array[start:end:step] D、逆转切片
###Code
g4[1,1,::-1]
g4
g4[0,::-1,1]
g4[0,::-1]
g4[0,:,::-1]
g4[1,:,::-1][:,0:2]
###Output
_____no_output_____
###Markdown
E、完全逆转
###Code
g4
g4[::-1,::-1,::-1]
###Output
_____no_output_____
###Markdown
有趣应用
###Code
from PIL import Image
img = Image.open('girl.jpg','r')
img.show()
import matplotlib.pyplot as plt
%matplotlib inline
image = plt.imread('/Users/huwang/Desktop/timg.jpeg')
display(image)
display(image.shape) # R red G green B blue
plt.imshow(image)
image2 = image.copy()
# display(image2)
plt.imshow(image2[:,:,::-1])
image3 = image2[400:600,500:800,:]
display(image3)
plt.imshow(image3)
image4=image2[100:500,100:400,:]
display(image4)
plt.imshow(image4)
image2[400:600,500:800,:] = image2[100:300,100:300,:]
plt.imshow(image2)
###Output
_____no_output_____
###Markdown
图片格式解释- j = np.random.randint(0,10,size=(2,4,3))- display(j) 里面的矩阵实际上就是rgb三个颜色- display(j[0].shape)- display(j[1].shape)- display(j[:,:,0]) 获取r通道- display(j[:,:,2].shape) 变化RGB三色- 注意无法改变原矩阵 girl[:,:,-1] = 0
###Code
image2 = image.copy()
image2[:,:,1] = 180
image2[:,:,2] = 88
display(image2)
plt.imshow(image2)
plt.imshow(image)
image7 = image.copy()
display(image7)
first = image7[:,:,0]
second = image7[:,:,1]
display(first,second)
display(first.shape)
sum_=first + second
display(sum_//2)
image7[:,:,0] = sum_ //2
plt.imshow(image7)
a = [[1,2],[3,4]]
b = [[5,6],[7,8]]
a1 = np.array(a)
b1 = np.array(b)
display(a1,b1)
display(a1 + b1)
###Output
_____no_output_____
###Markdown
完全更改图片,添加完全噪声 练习二> 完成RGB三色全逆转,并显示 3、array变形- reshape 转换形状的时候要注意维度是否合适
###Code
h = np.arange(0,100,5)
display(h)
h.reshape((1,2,10))
h.reshape(-1,1)
###Output
_____no_output_____
###Markdown
4. 级联- np.concatenate() 级联需要注意的点:- 级联的参数是列表:一定要加中括号或小括号- 维度必须相同- 形状相符 【重点】级联的方向默认是shape这个tuple的第一个值所代表的维度方向,一维和高维的区别- 可通过axis参数改变级联的方向
###Code
k = np.ones(shape=(3,4))
l = np.full(shape=(3,5),fill_value= 6)
display(k, l)
np.concatenate((k,l))
np.concatenate((k,l),axis = 1)
###Output
_____no_output_____
###Markdown
5np.hstack 水平级联np.vstack 垂直级联
###Code
k = np.ones(shape=(3,3))
l = np.full(shape=(3,3),fill_value= 7)
display(k, l)
np.hstack((k,l))
np.vstack((k,l))
###Output
_____no_output_____
###Markdown
6. 副本所有赋值运算不会为ndarray的任何元素创建副本。对赋值后的对象的操作也对原来的对象生效。可使用copy()函数创建副本 六、Numpy数组矩阵 1.数组矩阵加法
###Code
m = np.random.randint(0,10,size=(3,3),dtype=np.int)
n = np.random.randint(0,10,size=(3,3),dtype=np.int)
display(m,n)
m + n
###Output
_____no_output_____
###Markdown
2.数组矩阵减法
###Code
m - n
###Output
_____no_output_____
###Markdown
3.数组矩阵伪乘法
###Code
display(m,n)
m * n
###Output
_____no_output_____
###Markdown
4. 数组矩阵乘法
###Code
np.dot(m,n)
###Output
_____no_output_____
###Markdown
5. 矩阵逆
###Code
np.mat(m).I
np.dot(np.matrix(m),np.matrix(m).I)
###Output
_____no_output_____
###Markdown
6.矩阵转置
###Code
np.mat(m).T
###Output
_____no_output_____
###Markdown
7.矩阵共轭转置
###Code
np.mat(m).H
###Output
_____no_output_____
###Markdown
七、Numpy广播机制>- 重要:ndarray广播机制的两条规则- 规则一:为缺失的维度补1- 规则二:假定缺失元素用已有值填充- 例1: m = np.ones((2, 3)) a = np.arange(3) 求m+a
###Code
m = np.ones((2, 3))
a = np.arange(3)
display(m,a)
display(m + a)
###Output
_____no_output_____
###Markdown
八、array的排序 >- 1. 快速排序- np.sort()与ndarray.sort()都可以,但有区别:- np.sort()不改变输入- ndarray.sort()本地处理,不占用空间,但改变输入
###Code
test = np.random.randint(0,10,size=(2,3))
display(test)
test.sort(axis=0)
test
test2 = np.random.randint(0,10,size=(2,3))
display(test2)
display(np.sort(test2,axis=0))
###Output
_____no_output_____
###Markdown
>- 2.部分排序 - np.partition(a,k) - 有的时候我们不是对全部数据感兴趣,我们可能只对最小或最大的一部分感兴趣。 - 当k为正时,我们想要得到最小的k个数 - 当k为负时,我们想要得到最大的k个数
###Code
arr = np.random.randint(0,100,size=10)
arr
np.partition(arr,-2)
###Output
_____no_output_____
###Markdown
###Code
import numpy
import numpy as np
a=np.array([2,7,1,99,4])
type(a)
b=np.array([12,17,11,199,14])
a
b
a[0]
b[0]
a+b
a+2
a*2
a**2
x=np.array([[2,5,8],[3,6,1]])
x[0][0]
y=x+7
z=np.array([[2,7],[3,8],[2,9],[9,4]])
z
z.shape
z[0:2]
z[0:]
z[0:,0]
z[0:2,0]
z[0:,1]
z[0:,0:2]
z[0:,[0,1]]
z[[0,2]]
z[[0,3],1]
b=np.array[[1,9,7],[3,2,8],[4,5,6],[2,9,0]]
x1=x.reshape(3,2)
np.zeros((3,6))
np.ones((3,4),dtype=int32)
np.full((4,5),9)
###Output
_____no_output_____
###Markdown
Initializing Different types of arrays
###Code
## Initializing Different types of arrays
### np.zeros(shape, type)
zeros = np.zeros([2, 4, 6])
zeros
print(zeros.dtype)
### np.ones(shape, type)
ones = np.ones((2, 3,5), dtype=np.uint32)
ones
### any number 255
whiteImage = np.full((4,4), 255, dtype='uint32')
whiteImage
### full_like
# Let's say we want to generate the array of 100s that has the same shape with whiteImage array
array100s = np.full_like(whiteImage, 9)
array100s
# generating an array of decimals
randomFloat = np.random.rand(2,3,6)
randomFloat
# Generating an array of integers
randomInt = np.random.randint(-7, 15, size=(7, 9,7))
randomInt
# np.identity(number, dtype)
# Generates the identity matrix
identity = np.identity(4, dtype='int8')
identity
## Generating element's using the np.arange(15).reshape(3, 5) methods
arr = np.arange(15).reshape(3,5)
arr
# Repeating an array
arr1 = np.array([[2,4,6]])
arr2 = np.repeat(arr1, 3, axis=0) # repeates vertically
arr2
arr3 = np.repeat(arr1, 3, axis=1) # Repeats horizontally
arr3
###Output
_____no_output_____
###Markdown
Universal Functions
###Code
# the arange(n) function that generates 1-d array of integers from 0 to n-1
a = np.arange(10)
a # array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
# the all function
print(np.all(np.array([True, False, True, True, False]))) # False
print(np.all(np.array([True, True]))) # True
print(np.all(np.array([1,2, 3,-10]))) # True
# the Any function
print(np.any(np.array([True, False, True, True, False]))) # True
print(np.any(np.array([not True, not True]))) # False
print(np.any(np.array([1,2, 3,-10]))) # True
# argmax function
print(np.argmax(np.array([1,10,-19,9, 0,56,108,-76]))) # 6
# argmin
print(np.argmin(np.array([1,10,-19,9, 0,56,108,-76]))) # 6
# argsort function
print(np.argsort(np.array([1,10,-19,9, 0,56,108,-76]))) # [7 2 4 0 3 1 5 6]
# average function
print(np.average(np.array([1,10,-19,9, 0,56,108,-76]))) # 11.125
# bincount
print(np.bincount(np.array([1,1,2,6,7,0]))) # [1 2 1 0 0 0 1 1]
# ciel function
print(np.ceil(np.array([1.2, 2.8, 6.9, -6,10,9]))) # [ 2. 3. 7. -6. 10. 9.]
# floor function
print(np.floor(np.array([1.2, 2.8, 6.9, -6,10,9]))) # [ 1. 2. 6. -6. 10. 9.]
# clip function
print(np.clip(np.arange(10), 2, 8)) # [2 2 2 3 4 5 6 7 8 8]
# cumprod function
print(np.cumprod(np.array([2,3,5]))) # [ 2 6 30]
# cumsum function
print(np.cumsum(np.array([2,3,5]))) # [ 2 5 10]
# max function
print(np.max(np.random.randint(1, 100, 5))) # a random numbaer
# min function
print(np.min(np.random.randint(1, 100, 5))) # a random numbaer
# maximum function
print (np.maximum([2, 3, 4], [1, 5, 2])) # [2 5 4]
# minimum function
print (np.minimum([2, 3, 4], [1, 5, 2])) # [1 3 2]
# mean
print(np.mean(np.ones((5,)))) # 1.0
# Median
print(np.median(np.array([2, 5, -6, 8, 9]))) # 5
# nonzero function
print(np.nonzero(np.round(np.array(np.random.rand(10))))) # (array([2, 3, 4, 6, 7, 8], dtype=int64),)
# prod function
print(np.prod(np.array([2,2,2,3]))) # 24
# round function
np.round(np.array(np.random.rand(10))) # a random array
# sort function
np.sort(np.array([1, 2,10, 0,7,-6])) # array([-6, 0, 1, 2, 7, 10])
# std function [standard deviation]
np.std(np.array([5, 9, 9, 0])) # 3.6996621467371855
# sum funstcion
np.sum(np.ones(5)) # 5
# var function the varience of the array
np.var(np.array([5, 9, 9, 0])) # 13.6875
###Output
_____no_output_____
###Markdown
Copying arrays
###Code
## Copying arrays
array1 = np.array([1,2,3,4,5])
array2 = array1
print(array2) # [1 2 3 4 5]
array2[-1]= 500
print(array2) # [1 2 3 4 500]
# Array1 also changes
print(array1) # [1 2 3 4 500]
##-------------------------
array1 = np.array([1,2,3,4,5])
array2 = array1.copy()
print(array2) # [1 2 3 4 5]
array2[-1]= 500
print(array2) # [1 2 3 4 500]
# Array1 also changes
print(array1) # [1 2 3 4 5]
a = np.array([2,3,4,5,9,90])
### Trigonometry
print(np.sin(a))
print(np.cos(a))
print(np.tan(a))
# There are more
### Maths on two arrays
b = np.array([1,2,3,4,5,6])
print(a * b)
print(a ** b)
print(a / b)
print(a - b)
print(a + b)
# There are more
### Math on one array
print(a * b)
print(a ** b)
print(a / b)
print(a - b)
print(a + b)
# There are more
# Linear Algebra
a = np.ones([2, 2], dtype='int32')
b = np.full([2,2], 3, dtype= 'int32')
print(a * b)
# The correct way
print(np.matmul(a, b))
## Getting the deteminant of a
print(np.linalg.det(b))
# Statistics with numpy
arr = np.array([[2, 3, 4, 5,-1,9],[100, 19, 100, 78,10,-90]])
print(np.max(arr))
print(np.min(arr))
print(np.sum(arr))
# Reshaping arrays
arr = np.array([[2, 3, 4,5],[100, 19, 100, 7]])
print(arr.shape) # (2,4)
arr2 = arr.reshape([1, 8])
print(arr2.shape) # (1, 8)
## Stacking arrays
## Vertical stack of arrays or matrixes
arr1 = np.array([2,3,4,5,8])
arr2 = np.array([1,2,3,4,5])
print(np.vstack([arr1, arr2]))
# Horizontal stacking of arrays or matrixes
arr1 = np.array([2,3,4,5,8])
arr2 = np.array([1,2,3,4,5])
print(np.hstack([arr1, arr2]))
# Generating arrays from a file
## Suppose we have a file that has list of numbers seperated by a comma and we want to read this file and into a numpy array
# with default dtype
data = np.genfromtxt('data.txt', delimiter=',')
data
# As integer datatype
data = np.genfromtxt('data.txt', delimiter=',', dtype='int32')
data
# Or
data.astype('int8')
# Advanced indexing and Boolean Masking
data = np.genfromtxt('data.txt', delimiter=',', dtype='int32')
data[data%2==1]
#Indexing return elements that are at index, `1, 2, 6, 9` and the last element in the array `data` where elements are `ODD`
data = np.genfromtxt('data.txt', delimiter=',', dtype='int32')
data[data%2==1][[1, 2, 6, 9, -1]]
###Output
_____no_output_____
###Markdown
Transpose `np.array`* This means we are just flipping the axis
###Code
A = np.array([
[2, 3, 4.],
[4, 5, 9]
])
A
B = np.transpose(A)
B
###Output
_____no_output_____
###Markdown
Trace
###Code
C = np.trace(A)
C
###Output
_____no_output_____
###Markdown
`eye`* Create a matrix of shape n with `1s` on the diagonal and `0s` otherwise.
###Code
a = np.eye(3)
a
one_hot_at_one = np.eye(10)[1]
one_hot_at_one
###Output
_____no_output_____
###Markdown
Broadcasting* Here, we’re adding a vector with a scalar. Their dimensions aren’t compatible as is but how does NumPy still gives us the right result? This is where broadcasting comes in. The scalar is broadcast across the vector so that they have compatible shapes.
###Code
A = np.array(9)
B = np.array([7, 8, 9])
A+B
###Output
_____no_output_____
###Markdown
Test1. one2. two TEST- one- two* three* four
###Code
import numpy as np
x = np.array([5, 8,
9, 10,
11]) # using 'array' method
type(x) # Displays type of array 'x'
x
y = np.array([[6, 9, 5],
[10, 82, 34]])
print(y)
print(y.ndim, y.shape, y.size, y.dtype, y.itemsize, y.nbytes)
z = np.array([[6, 9, 5],
[10, 82, 34]],
dtype='float64')
print(z)
print(z.dtype)
n = [5, 10, 15, 20, 25]
array_x=np.array(n)
print(type(array_x))
print(array_x.dtype)
print(array_x.ndim,array_x.shape,array_x.size)
n2 = [[-1, -2, -3, -4], [-2,-4, -6, -8]]
array_y=np.array(n2)
print(type(array_x))
print(array_x.dtype)
print(array_y.ndim,array_y.shape,array_y.size,array_y.dtype,array_y.itemsize)
a = [[[4.1, 2.5], [1.1, 2.3], [9.1, 2.5]],
[[8.6, 9.9],[3.6, 4.3], [6.6, 0.3]]]
xx = np.array(a, dtype='float64')
type(xx), xx.ndim, xx.shape
xxx = np.zeros(shape=(2,4))
print(xxx)
yy = np.full(shape=(2,3), fill_value=10.5)
print(yy)
b=np.arange(3, 15, 2.5) # 2.5 is step
print(b)
c = np.linspace(3, 15, 5) # 5 is size of array 'y'
print(c)
np.random.seed(100) # setting seed
d = np.random.rand(2) # 2 random numbers between 0 and 1
print(d)
np.random.seed(100) # setting seed
e = np.random.randint(10, 50, 3) # 3 random integers between 10 and 50
print(e)
np.random.seed(100)
f = np.random.randn(3) # Standard normal distribution
print(f)
np.random.seed(100)
g = 10 + 2.5*np.random.randn(3) # normal distribution with mean 10 and sd 2
print(g)
from io import StringIO
stream = StringIO('''88.25 93.45 72.60 90.90
72.3 78.85 92.15 65.75
90.5 92.45 89.25 94.50
''')
data = np.loadtxt(stream,delimiter=' ')
print(data)
print(data.ndim, data.shape)
print(np.array(([1, 2], (3,4))).shape)
print(np.eye(2))
x1=np.array([[[-1,1],[-2,2]],[[-3, 3], [-4, 4]]])
print(x1.ndim,x1.shape,x1.size)
x2 = np.full(shape=(3,2,2), fill_value=1)
x3 = np.eye(4)
print(x2)
print(x3)
np.random.seed(100)
x4 = np.random.rand(24).reshape((3, 4, 2))
print(x4)
x5 = 5+2.5*np.random.randn(20)
print(x5)
x6=np.arange(0,40,2)
print(x6)
x7=np.linspace(10,20,30)
print(x7)
np.random.seed(100)
x8 = np.random.randint(10, 100, 8)
print(x8, end='\n\n')
y2 = x8.reshape(2,4)
print(y2, end='\n\n')
z2 = x8.reshape(2,2,2)
print(z2, '\n\n')
x9= np.array([[-1, 1], [-3, 3]])
y3 = np.array([[-2, 2], [-4, 4]])
np.vstack((x9,y3))
x10 = np.array([[-1, 1], [-3, 3]])
y4 = np.array([[-2, 2], [-4, 4]])
z3 = np.array([[-5, 5], [-6, 6]])
np.hstack((x10,y4,z3))
x = np.arange(30).reshape(6, 5)
print(x,end='\n\n')
res = np.vsplit(x, 2)
print(res[0], end='\n\n')
print(res[1])
x = np.arange(30).reshape(6, 5)
print(x,end='\n\n')
res = np.vsplit(x, (2, 5))
print(res[0], end='\n\n')
print(res[1], end='\n\n')
print(res[2])
x = np.arange(10).reshape(2, 5)
print(x,end='\n\n')
res = np.hsplit(x, (2,4))
print(res[0], end='\n\n')
print(res[1], end='\n\n')
print(res[2])
x = np.arange(6).reshape(2,3)
print(x)
y = np.hsplit(x,(2,))
print(y[0])
x=np.arange(20)
print(x.shape)
y=x.reshape((2, 10))
print(x,end='\n\n')
print(y,end='\n\n')
np.hsplit(y,2)
z=x.reshape((4, 5))
print(z)
np.vsplit(z,2)
x=np.arange(3,15,3)
print(x)
p=x.reshape(2,2)
print(p)
x=np.arange(15,33,3)
print(x)
q=x.reshape(2,3)
print(q)
np.hstack((p,q))
x = np.arange(6).reshape(2,3)
print(x, end='\n\n')
print(x + 10, end='\n\n')
print(x * 3, end='\n\n')
print(x % 2)
x = np.array([[-1, 1], [-2, 2]])
y = np.array([[4, -4], [5, -5]])
print(x + y, end='\n\n')
print(x * y)
x = np.array([[-1, 1], [-2, 2]])
y = np.array([-10, 10])
print(x * y)
x = np.array([[0,1], [2,3]])
print(np.square(x), end='\n\n')
print(np.sin(x))
x = np.array([[0,1], [2, 3]])
print(x.sum(), end='\n\n')
print(x.sum(axis=0), end='\n\n')
print(x.sum(axis=1))
x = np.arange(4)
y = np.arange(4)
print(x == y)
print(np.repeat(3, 4))
x = np.array([[-2],
[2]])
y = np.array([[-3, 3]])
print(x.dot(y))
x = np.arange(30).reshape(5,6)
print(x.argmax(axis=1))
x = np.arange(4).reshape(2,2)
print(np.isfinite(x))
y=np.arange(6).reshape((2, 3))
print(y)
np.square(y)+5
x=np.random.randint(-30,30,size=(5, 6))
print(x)
print(np.sum(x,axis=0))
print(np.sum(x,axis=1))
x=10+2*np.random.randn(50)
print(x)
print(np.mean(x),np.std(x),np.var(x))
x = np.array([5, 10, 15, 20, 25, 30, 35])
print(x[1]) # Indexing
print(x[1:6]) # Slicing
print(x[1:6:3]) # Slicing
y = np.array([[0, 1, 2],
[3, 4, 5]])
print(y[1:2, 1:3])
print(y[1])
print(y[:, 1])
z = np.array([[[-1, 1], [-2, 2]],
[[-4, 4], [-5, 5]],
[[-7, 7], [-9, 9]]])
print(z[1,:,1])
print(z[1:,1,:])
print(z[2])
x = np.array([[-1, 1], [-2, 2]])
for row in x:
print('Row :',row)
x = np.array([[0,1], [2, 3]])
for a in np.nditer(x):
print(a)
x = np.arange(10).reshape(2,5)
condition = x % 2 == 0
print(condition)
print(x[condition])
x = np.arange(12).reshape(3,4)
print(x[-1:,].shape)
print(x[-2])
x = np.array([[1, 2], [3, 4], [5, 6]])
print(x[[0, 1, 2], [0, 1, 1]])
x = np.arange(30).reshape(3,5,2)
print(x)
print(x[1,::2,1])
x = np.array([[0, 1], [1, 1], [2, 2]])
y = x.sum(-1)
print(x[y < 2, :])
x = np.arange(30).reshape(6, 5)
print(x)
print(x[-1])
x=np.arange(30).reshape(2, 3, 5)
print(x)
b=np.array([True,False])
b.shape
x[b]
x[b,:,1:3]
x[b,2,1:3]
x = np.array([[-1,0,1], [-2, 0, 2]])
y = np.zeros_like(x)
print(y)
x = np.arange(30).reshape(3,5,2)
print(x[1,::2,1])
x = np.arange(4)
print(x.flatten())
x = np.arange(6).reshape(2,3)
y = np.hsplit(x,(2,))
print(y[0])
x = np.arange(12).reshape(3,4)
print(x[:,1])
x = np.array([[-2],
[2]])
y = np.array([[-3, 3]])
print(x + y)
x = np.array([[3.2, 7.8, 9.2],
[4.5, 9.1, 1.2]], dtype='int64')
print(x.itemsize)
x = np.arange(30).reshape(3,5,2)
print(x[-1, 2:-1, -1])
###Output
[25 27]
###Markdown
###Code
import numpy as np
np.array([10, 20, 24, 5, 15, 50])
a = np.array([10, 30, 20, 4 ,30, 51, 7, 2, 4, 40, 100])
a[4]
a[3:]
a[3:7]
a[1::4]
np.zeros(5)
ones = np.ones((4, 5))
ones
np.ones(4)
np.zeros((2,3))
type(np.ones)
type(np.ones((4, 5)))
type(ones[3][1])
np.linspace(3, 10, 5)
b = np.array([['x', 'y', 'z'],['a','c','e']])
print(b)
type(b)
b.ndim # número de dimensiones
c = [12, 4 ,10, 40, 2]
np.sort(c)
cabeceras = [('nombre', 'S10'), ('edad', int)]
datos = [('Juan', 10), ('Maria', 70), ('Javier', 42), ('Samuel', 15)]
usuarios = np.array(datos, dtype = cabeceras)
np.sort(usuarios, order = 'edad')
np.arange(25)
np.arange(5, 30)
np.arange(5, 50, 5)
np.full((3, 5), 10)
np.diag([0,3,9,10])
cabeceras = [('nombre', 'S10'), ('edad', int), ('pais', 'S10')]
datos = [('Juan', 10, 'Chile'), ('Maria', 70, 'Colombia'), ('Javier', 42, 'Brasil'), ('Samuel', 15, 'Argentina')]
usuarios = np.array(datos, dtype = cabeceras)
np.sort(usuarios, order = 'edad')
np.sort(usuarios, order = 'pais')
np.sort(usuarios, order = 'nombre')
###Output
_____no_output_____
###Markdown
###Code
my_list = [1,2,3]
import numpy as np
arr = np.array(my_list)
arr
my_mat = [[1,2,3],[4,5,6],[7,8,9]] #List of Lists
my_mat
np.array(my_mat)
###Output
_____no_output_____
###Markdown
2 square brackets shows that this is an 2-d array: Python built-in methods to make array **1. np.arange(s,e,w)**:- s(start), e(end), w(step size)arange is a widely used function to quickly create an array. Passing a value 20 to the arange function creates an array with values ranging from 0 to 19.
###Code
np.arange(10)
###Output
_____no_output_____
###Markdown
If we only use the arange function, it will output a one-dimensional array. To make it a two-dimensional array, chain its output with the reshape function.
###Code
ar = np.arange(20).reshape(4,5)
ar
np.arange(0,10)
np.arange(0,10,2)
###Output
_____no_output_____
###Markdown
**2. All zeros array**
###Code
np.zeros(3)
np.zeros((2,3))
###Output
_____no_output_____
###Markdown
**3. All ones Array**
###Code
np.ones(4)
np.ones((3,4))
###Output
_____no_output_____
###Markdown
**Linspace** :- take start and end the third argument is no. of evenly spaced points we want between those points
###Code
np.linspace(0,5,20)
###Output
_____no_output_____
###Markdown
**4. Identity matrix**: A 2-d square matrix, number of rows = number of columns with diagonal elements 1 and other elements being 0
###Code
np.eye(4)
###Output
_____no_output_____
###Markdown
Creating Random Number Arrays **np.random.rand()** :- Random numbers form uniform distribution from 0 to 1
###Code
np.random.rand(5)
np.random.rand(5,5) # Not tuples as an argument
###Output
_____no_output_____
###Markdown
**np.random.randn()** :- Random numbers not from the uniform distribution from 0 to 1 but from the standard normal distribution centered around 0
###Code
np.random.randn(5)
np.random.randn(5,5)
###Output
_____no_output_____
###Markdown
**np.random.randint()** :- randint(low,high,size)generates random integer between low and high(low is inclusive and high is exclusive) third argument size gives no. of integers we want between them
###Code
np.random.randint(1,10) #default size is 1
np.random.randint(1,100,5)
###Output
_____no_output_____
###Markdown
**Concept of Reshaping**
###Code
arr = np.arange(25)
arr
rearr = np.random.randint(0,50,10)
rearr
arr.reshape(5,5)
rearr
rearr.max()
rearr.min()
###Output
_____no_output_____
###Markdown
We can also find the index value at which minimum and maximum value is present using argmax() and argmin()
###Code
rearr.argmax()
rearr.argmin()
arr.shape
arr = arr.reshape(5,5)
arr.shape
arr
arr.dtype
###Output
_____no_output_____
###Markdown
Instead of using np.random.randint() we can also import and then use it as seen below
###Code
from numpy.random import randint
randint(2,10)
###Output
_____no_output_____
###Markdown
Numpy array indexingIndex starts from 0
###Code
import numpy as np
arr = np.arange(0,11)
arr
arr[8]
arr[1:5] #start at index 1 to index 4
arr[0:5] #start at index 0 to index 4
arr[:6] #Similar to arr[0:6] print everything till index 6 from start
arr[0:6]
arr[5:] #print everything beyond index 5
arr[0:5] = 100 #broadcast 100 to index 4
arr
arr = np.arange(0,11)
arr
slice_of_arr = arr[0:6] #get that chunk of array
slice_of_arr
slice_of_arr[:] = 99 #grabbing everything we have and brodacasting 99 to it
slice_of_arr
arr # If I call back the array the value 99 gets into the original one also
###Output
_____no_output_____
###Markdown
data is not copied it is just a view to the original array. The reason why nummpy does that is to avoid memory issues with very large arrays meaning numpy is not automatically set copies for arrays. If you originally want a copy not a reference to original array we can specifically specify copy
###Code
arr_copy = arr.copy()
arr
arr_copy
arr_copy[:] = 100
arr_copy
arr
###Output
_____no_output_____
###Markdown
The original array is not affected **Indexing in 2d Array**
###Code
import numpy as np
arr_2d = np.array([[5,10,15], [20,25,30],[35,40,45]])
arr_2d
arr_2d[0][0] #Element in first row and first column
arr_2d[0]
arr_2d[2][1]
arr_2d[2,1]
###Output
_____no_output_____
###Markdown
Let's assume you don't want single element but want chunks of array for instance you want submatrices from this matrix
###Code
arr_2d[:2,1:]
arr_2d[:2]
arr_2d[1:,1:]
###Output
_____no_output_____
###Markdown
Conditional Selection from array
###Code
arr= np.arange(1,11)
arr
arr>5 #Gets true and false values based on the comparison
bool_arr = arr>5
bool_arr
arr[bool_arr] #gets the values where boolean values are true
arr[arr>5]
arr[arr<3]
arr_2d = np.arange(50).reshape(5,10)
arr_2d
arr_2d[1:2,3:5]
###Output
_____no_output_____
###Markdown
Numpy Operations1. Array with Array2. Array with scalars3. Universal Array functions
###Code
import numpy as np
arr = np.arange(0,11)
arr
arr + arr
arr-arr
arr*arr
arr +100 #add 100 to every element of Array
arr*100 #multiply 100 to every element of Array
0/0
arr/arr #since the first element of array is 0 it becomes 0/0 in case of nunmpy it throws a warning not an error
1/arr
arr**2 #Exponents
###Output
_____no_output_____
###Markdown
**Universal Array Function**
###Code
np.sqrt(arr)
np.exp(arr)
np.max(arr)
arr.max()
np.sin(arr)
np.log(arr)
###Output
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:1: RuntimeWarning: divide by zero encountered in log
"""Entry point for launching an IPython kernel.
###Markdown
Numpy [http://www.numpy.org/](http://www.numpy.org/) Errors
###Code
x = 1.0 / 3.0
print(x)
y = 10**30
print(y)
print(x * y)
print('{0:f}'.format(x * y))
###Output
0.3333333333333333
1000000000000000000000000000000
3.333333333333333e+29
333333333333333316505293553664.000000
###Markdown
Basics
###Code
import numpy as np
x = np.array([1,2,3,4])
x
x = np.array([[1,2,3,4], [5,6,7,8]])
x
x = np.ones([2,3])
x
x = np.zeros([2,3])
x
x = np.array([[1.0, 2.0], [3.0, 4.0]])
y = np.array([[5.0, 6.0], [7.0, 8.0]])
print(x)
print(y)
print(x + y)
print(x - y)
print(x * y)
print(np.matmul(x, y))
print(x**2)
###Output
[[ 1. 4.]
[ 9. 16.]]
###Markdown
Types
###Code
x = np.array([[1, 2], [3, 4]])
x.dtype
x = np.array([[1.0, 2.0], [3.0, 4.0]])
x.dtype
x = np.array([[1, 2], [3.0, 4.0]])
x.dtype
x = np.array([[True, False],[False, True]])
print(x)
x.dtype
x.astype(int)
###Output
_____no_output_____
###Markdown
Slicing
###Code
x = np.arange(0.0, 10.0, 0.1)
x
x.shape
x = x.reshape([10, 10])
x
x[0]
x[:,0]
x[2,3]
x[4:8,1:4]
x = np.array([[1.0, 2.0], [3.0, 4.0]])
y = np.array([[5.0, 6.0], [7.0, 8.0]])
print(x)
print(y)
np.vstack([x, y])
np.hstack([x, y])
x = np.array([1.0, 2.0])
y = np.array([[5.0, 6.0], [7.0, 8.0]])
print(x)
print(y)
np.vstack([x,y])
np.hstack([x,y])
###Output
_____no_output_____
###Markdown
Tutorial [https://docs.scipy.org/doc/numpy/user/quickstart.html](https://docs.scipy.org/doc/numpy/user/quickstart.html)
###Code
import numpy as np
import matplotlib.pyplot as pl
def mandelbrot(h, w, maxit=20):
"""Returns an image of the Mandelbrot fractal of size (h,w)."""
y, x = np.ogrid[-1.4:1.4:h*1j, -2:0.8:w*1j]
c = x + y * 1j
z = c
divtime = maxit + np.zeros(z.shape, dtype=int)
for i in range(maxit):
z = z**2 + c
diverge = z * np.conj(z) > 2**2 # who is diverging
div_now = diverge & (divtime==maxit) # who is diverging now
divtime[div_now] = i # note when
z[diverge] = 2 # avoid diverging too much
return divtime
pl.imshow(mandelbrot(400,400))
###Output
_____no_output_____
###Markdown
Veri Bilimi için Programlama Vektörler - Matrisler - Tensör ve DizilerDr. Murat Gezer **NumPy** , Python programlama dili için geliştirilmiştir bir kütüphanedir. NumPy, Python'u bilimsel hespalama için aritmetik ve sayısal hesaplamalar için işlevlerle genişletir. Kitaplık, matrisler, çok boyutlu diziler ve vektörlerle verimli hesaplama yapılmasını sağlamaktadır. Numpy dizileri devamlı olarak aynı veri türünde olmak durumundadırAna özellikleri ve avantajları şu şekildedir ;- Güçlü bir N-boyutlu dizi nesnesi- Sofistike fonksiyonlar- Donanıma daha yakın (verimlilik)- Bilimsel hesaplama için tasarlanmış (kolaylık) - C / C ++ ve Fortran kodunu bütünleştirme araçları - lineer cebir, Fourier dönüşümü ve rastgele sayı Eğer hızlı şekilde nümerik işlem yapacak ise bize daha hızlı şekilde hafızada alan açıp yapacaktır. Örneğin bilgisayarımızda ilk 10000000 sayının karesini almak istersek;
###Code
import numpy as np
L = range(10000000)
%timeit [i**2 for i in L]
a = np.arange(10000000)
%timeit a**2
L
###Output
3.03 s ± 8.81 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
15.4 ms ± 44.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
Dizi Oluşturma - 1-D dizi oluşturma
###Code
# Kütüphane yükleme
import numpy as np
# Satir vektoru oluşturma (row vector)
satirVektoru = np.array([0, 1, 2, 3])
#Sütun vektörü oluşturma (column vector)
sutunVektoru=np.array([[0],[1],[2],[3]])
print(satirVektoru)
print(sutunVektoru)
###Output
[0 1 2 3]
[[0]
[1]
[2]
[3]]
###Markdown
- 2-D dizi oluşturma
###Code
matris=np.array([[1,2,3],[3,4,8],[5,6,9]])
print(matris)
###Output
[[1 2 3]
[3 4 8]
[5 6 9]]
###Markdown
Bir dizinin rankını bulma ve dizinin boyutu
###Code
print(".ndim",matris.ndim) # Dizinin eksenlerinin (boyutları) sayısı. Python dünyasında, boyutların sayısına rank denir.
print("shape",matris.shape) #Dizinin boyutları. Bu, her boyutta dizinin boyutunu belirten bir tamsayı yığınıdır.
#N sıralı ve m sütunlu bir matris için matris(n, m) olacaktır.
#Bu nedenle, matris dizesinin uzunluğu sırası veya boyut sayısıdır, ndim.
###Output
.ndim 2
shape (3, 3)
###Markdown
- Dizinin boyutunu bulmak : ndarray.ndim.ndim →numpy array nesnesinin boyutunu döndürür.- Dizinin satır ve sütun sayısını bulmak: ndarray.shape.shape →Numpy array nesnesinin kaç satır ve sütundan oluştuğunu gösteren bir tupple nesnesi döndürür.Bununla birlikte, matris veri yapısı iki nedenden dolayı önerilmemektedir. İlk diziler, Numpy'nin standart standart veri yapısıdır. İkincisi, Numpy işlemlerinin büyük çoğunluğu matris nesnelerini değil dizi döndürür Vektör ya da Matris İçinden Eleman SeçimiMatris veya vektör kümeleri içinden aşağıdaki gibi bir ya da daha fazla elemana erişebilirsiniz.
###Code
# Kütüphane yükleme
import numpy as np
# Satir vektoru oluşturma (row vector)
a = np.array([1, 2, 3,4,5,6,7,8,9,10])
# Bir A matrisi oluşturalım
A=np.array([[1,2,3],[4,5,6],[7,8,9]])
# Vektörün 3. elemanını seçelim
print ("a vektörüm=",a)
print("\n")
print("vektörün 3. elemanı=",a[2])
# Bir matrisin 2. satir 3. sütununu seçmek istersel
#print ("A matrisi=\n",A)
print("\n")
print("A matisinin 2. satırı ve 3. sütunu:",A[1,2])
# Bir vektörden tüm elemanların çekilmesi
#print(a[:])
# vektörde seçilen elemana kadar olan değerlerin getirlmesi
print(a[:3])
# vektörde seçilen elemandan sonra olan değerlerin getirlmesi
print(a[4:])
# Matris içinde seçilen satıra (2. satır) ve ondan sonra gelen satırlar için
# tüm sütun elemanlarının getirilmesi
print(A[:2,:])
# Matristeki seçilen sütuna ait değerlerin getirilmesi
print(A[:,1:2])
###Output
[[2]
[5]
[8]]
###Markdown
Matris İşlemleri -1Python'da farklı matris işlemlerini ve işlemlerini çözülebilir.**add ()** veya **+** - iki matrisi toplar**subtract ()** veya **-** - iki matrisde çıkartma işlemi**divide ()** veya **/** - iki matrisde eleman bölme işlemi**multiply ()** veya * - iki matrisde eleman çarpma işlemi**dot ()** **@** - Matris çarpımını gerçekleştirir**sum (x, eksen)** - matristeki tüm elemanların toplamı . İkinci argüman isteğe bağlıdır, eksen 0 ise sütun toplamını ve eksen 1 ise satır toplamını hesaplamak istediğimizde kullanılır.**T** - Belirtilen matrisin transpozesi
###Code
# Kütüphane yükleme
import numpy as np
# Bir A matrisi oluşturalım
A = np.array([[1, 2], [4, 5]])
# Bir B matrisi oluşturalım
B = np.array([[2, 8], [4, 2]])
print (A,"\n",B)
# toplama işlemi
print ("iki matrisin toplamı (A+B): ")
print (np.add(A,B))
# çıkarma işlemi
print ("iki matrisin farkı (A-B): ")
print (np.subtract(A,B))
# çarpma işlemi
print ("iki matrisin eleman çarpımı: ")
print (np.multiply(A,B))
# çarpma işlemi
print ("iki matrisin çarpımı: ")
print (np.multiply(A,B))
# bölme işlemi
print ("iki matrisin bölümü: ")
print (np.divide(A,B))
# matris çarpımı işlemi
print ("iki matrisin matris çarpımı: ")
print (np.dot(A,B))
# matris elemanların toplamı
print ("matrisin elemanlarının toplam değeri: ")
print (np.sum(A))
# matris satır elemanların toplamı
print ("matrisin satır elemanlarının toplam değeri: ")
print (np.sum(A,1))
# matris sütun elemanların toplamı
print ("matrisin sütun elemanlarının toplam değeri: ")
print (np.sum(A,0))
###Output
[[1 2]
[4 5]]
[[2 8]
[4 2]]
iki matrisin toplamı (A+B):
[[ 3 10]
[ 8 7]]
iki matrisin farkı (A-B):
[[-1 -6]
[ 0 3]]
iki matrisin eleman çarpımı:
[[ 2 16]
[16 10]]
iki matrisin çarpımı:
[[ 2 16]
[16 10]]
iki matrisin bölümü:
[[0.5 0.25]
[1. 2.5 ]]
iki matrisin matris çarpımı:
[[10 12]
[28 42]]
matrisin elemanlarının toplam değeri:
12
matrisin satır elemanlarının toplam değeri:
[3 9]
matrisin sütun elemanlarının toplam değeri:
[5 7]
###Markdown
Matris İşlemleri -2 Veri Manipülasyonu **T** - Belirtilen matrisin transpozesi**reshape** yeniden şekillendir**flatten** tek boyuta dönüştürme
###Code
# matris'in Transpozesi
print ("matrisin transpozesi: ")
print (A.T)
C=np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])
print("\nC=\n",C)
print(C.reshape(8,2))
print(C.flatten())
###Output
matrisin transpozesi:
[[1 4]
[2 5]]
C=
[[ 1 2 3 4]
[ 5 6 7 8]
[ 9 10 11 12]
[13 14 15 16]]
[[ 1 2]
[ 3 4]
[ 5 6]
[ 7 8]
[ 9 10]
[11 12]
[13 14]
[15 16]]
[ 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16]
###Markdown
Bir Matrisin Köşegenini (Diagonel) Alma**diagonal** köşegen**diagonal(offset=1)** üst köşegen**diagonal(offset=-1)** alt köşegen
###Code
C=np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])
print(C,"\n")
print(C.diagonal(offset=-1))
###Output
[[ 1 2 3 4]
[ 5 6 7 8]
[ 9 10 11 12]
[13 14 15 16]]
[ 5 10 15]
###Markdown
Dizi Oluşturma İşlevleriUygulamada, nadiren elemanları tek tek gireriz,eşit araklı dizi oluşturmak için **arange(n)** 0 dan n-1 kadar değer sahip dizi**arrange(baslangic,bitis,adım)** baslangıçtan bitişe kadar adım aralıklı dizi oluştur **linspace(baslangic,bitiş,noktasayısı)**
###Code
a = np.arange(10) #0 dan n-1
#print(a)
print("\n")
A = np.arange(12).reshape(3,4) # 2 boyutlu (matris)
#print (A)
print("\n")
T = np.arange(24).reshape(2, 3, 4) # 3 boyutlu dizi (tensor)
#print (T)
print(np.arange(10000))
print("\n")
#print(np.arange(10000).reshape(100,100))
b = np.arange(1, 9, 2)
c = np.linspace(0, 1, 6)
print(b)
print("\n")
print(c)
###Output
[1 3 5 7]
[0. 0.2 0.4 0.6 0.8 1. ]
###Markdown
**ones**, **zeros**,**eye** ve **diag**ile dizi oluşturma
###Code
#print(np.ones((2,2)))
print("\n")
#print(np.zeros((4,4)))
print("\n")
#print(np.eye(3))
print("\n")
#print(np.diag(np.array([3,4])))
# Rastlantısal sayı üretici (Mersene Twister rastgele sayı üreticisi)
np.random.seed(12) # Rastgele tohum belirleme
a = np.random.rand(4) #uniform dağılım [0-1]
print(a)
print("\n")
b = np.random.randn(4) #gaussian dağılım
print(b)
print("\n")
c = np.floor(np.random.random((4,4))*10).astype('int8') #
print(c)
print("\n")
print(c.dtype)
###Output
[0.15416284 0.7400497 0.26331502 0.53373939]
[ 0.75314283 -1.53472134 0.00512708 -0.12022767]
[[5 4 7 1]
[7 0 1 1]
[3 6 4 8]
[2 7 7 3]]
int8
###Markdown
numpy'ın ön tanımlı veri tipi float olmaktadır.'?'boolean'b'(signed) byte'B'unsigned byte'i'(signed) integer'u'unsigned integer'f'floating-point'c'complex-floating point'm'timedelta'M'datetime'O'(Python) objects'S', 'a'zero-terminated bytes (not recommended)'U'Unicode string'V'raw data (void)float64int32int64uint32uint64
###Code
import numpy as np
# Veri türü ile dizi oluşturma
dizi1 = np.array([-1,0,1,2,3,4,5,6,65537],dtype='?')
print("dizi:", dizi1)
print("veritürü:", dizi1.dtype)
# S, dize veri türünü tanımlamak için kullanılır.
# Boyutlarıyla birlikte çeşitli diğer veri türlerini tanımlamak için i, u, f, S ve U kullanırız.
import numpy as np
# veri türü ve boyutu 8, yani 64 bayt ile birlikte dizi oluşturma ve başlatma
arr = np.array([1, 2, 3, 4], dtype='i8')
print('Dizi:', arr)
print('Veri Türü:', arr.dtype)
def is_Power_of_two(n):
return n > 0 and (n & (n - 1)) == 0
bit 1 & 0 = 1
bit 11 & 10 = 10 1
n=3 100
n-1=2 011
11
10
10
###Output
_____no_output_____
###Markdown
###Code
import numpy as np
# Vector 1-D array
a = [1,2,3]
a = a + [1]
print(a)
# Numpy array 1-D
b = np.array([4,5,6])
b = np.append(b,[7])
A = np.array([[1,22,3],[4,5,6],[111,-11,33]])
B = np.array([[10,11,12],[13,14,15],[14,7,2.5]])
A.shape
sum = np.sum(np.dot(A,B))
print(sum)
sum.dtype
C = np.array([[10,11,12],[13,14,15],[16,17,18]])
C
C.shape
K = np.array([[1,2,3],[1,2,3],[2,3,5]])
K.ndim
F = np.random.random(size=5)
F
L = np.random.rand(4,4,4)
L
L.shape
U = np.random.uniform(4.3,5.3,3)
print(U)
U.shape
Z_3D = np.array([
[
[1,2,3],
[4,5,6],
[7,8,9]
],
[
[1,2,3],
[4,5,6],
[7,8,9]],
[[1,2,3],
[4,5,6],
[7,8,9]
],
[
[1,2,3],
[4,5,6],
[7,8,9]
]
]
)
print(Z_3D)
print("Number of Dimensions",Z_3D.ndim)
print("Size of Array",Z_3D.size)
B = np.array([[[
[1,2,3],[1,2,3]],
[[1,2,3],[1,2,3]],
[[1,2,3],[1,2,3]]
]])
print(B)
print(B.ndim)
B.shape
Z_3D = np.zeros_like([
[
[1,2,3],
[4,5,6],
[7,8,9]
],
[
[1,2,3],
[4,5,6],
[7,8,9]],
[[1,2,3],
[4,5,6],
[7,8,9]
],
[
[1,2,3],
[4,5,6],
[7,8,9]
]
]
)
print(Z_3D)
print("Number of Dimensions ",Z_3D.ndim)
print("Size of Array",Z_3D.size)
###Output
[[[0 0 0]
[0 0 0]
[0 0 0]]
[[0 0 0]
[0 0 0]
[0 0 0]]
[[0 0 0]
[0 0 0]
[0 0 0]]
[[0 0 0]
[0 0 0]
[0 0 0]]]
Number of Dimensions 3
Size of Array 36
###Markdown
**Numpy pakage**
###Code
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
تولید آرایه
###Code
a=[1,2,3,4,5,6,7,8,9]
a1=np.array(a)
print(a1)
###Output
[1 2 3 4 5 6 7 8 9]
###Markdown
عملیات چهارگانه بر روی ارایه ها
###Code
w0=a1+10
w1=a1*2
w2=a1-1
w3=a1/10
print(w0)
print(w1)
print(w2)
print(w3)
###Output
[11 12 13 14 15 16 17 18 19]
[ 2 4 6 8 10 12 14 16 18]
[0 1 2 3 4 5 6 7 8]
[0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9]
###Markdown
شکل آرایه ها
###Code
print(w3.dtype)
print(w3.shape)
###Output
float64
(9,)
###Markdown
آرایه دوبعدی و بیشتر
###Code
q=np.array([
[1,2,3,3,4,5]
,[1,2,3,4,5,6]
,[1,2,3,4,5,0.6]
])
q1=q[0:3,:]
print(q1)
print(q.shape)
print(q.dtype)
q1.itemsize
###Output
[[1. 2. 3. 3. 4. 5. ]
[1. 2. 3. 4. 5. 6. ]
[1. 2. 3. 4. 5. 0.6]]
(3, 6)
float64
###Markdown
تولید ماتریس صفر و ماریس یک
###Code
b=np.ones((3,3),np.float64)
p=np.zeros((4,4),np.int64)
print(b)
print(p)
###Output
[[1. 1. 1.]
[1. 1. 1.]
[1. 1. 1.]]
[[0 0 0 0]
[0 0 0 0]
[0 0 0 0]
[0 0 0 0]]
###Markdown
تغییر سایز آرایه ها
###Code
q=np.array([
[1,2,3,3,4,5]
,[1,2,3,4,5,6]
,[1,2,3,4,5,0.6]
])
q0=q.reshape(6,3)
print(q)
print(q0)
e=q.shape
e[0]
e2=list(q.flat)
print(e2)
###Output
[[1. 2. 3. 3. 4. 5. ]
[1. 2. 3. 4. 5. 6. ]
[1. 2. 3. 4. 5. 0.6]]
[[1. 2. 3. ]
[3. 4. 5. ]
[1. 2. 3. ]
[4. 5. 6. ]
[1. 2. 3. ]
[4. 5. 0.6]]
[1.0, 2.0, 3.0, 3.0, 4.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 1.0, 2.0, 3.0, 4.0, 5.0, 0.6]
###Markdown
الحاق کردن دو ماتریس به یکدیگر
###Code
a0=np.array([ [1,2,3,4,5,6,7,8],
[9,10,11,12,13,14,15,16],
[17,18,19,20,21,22,23,24]
])
b0=np.array([ [1,2,3,4,5,6,7,8],
[9,10,11,12,13,14,15,16],
[17,18,19,20,21,22,23,24]
])
c0=np.vstack((a0,b0))
c1=np.hstack((a0,b0))
c2=list(c1.flat)
c3=np.concatenate((a0,b0),axis=1)
c4=np.concatenate((a0,b0),axis=0)
print(a0)
print("---------------------------------")
print(c0)
print("---------------------------------")
print(c1)
print("---------------------------------")
print(c2)
print("----------------------------------")
print(c3)
print("-----------------------------------")
print(c4)
###Output
[[ 1 2 3 4 5 6 7 8]
[ 9 10 11 12 13 14 15 16]
[17 18 19 20 21 22 23 24]]
---------------------------------
[[ 1 2 3 4 5 6 7 8]
[ 9 10 11 12 13 14 15 16]
[17 18 19 20 21 22 23 24]
[ 1 2 3 4 5 6 7 8]
[ 9 10 11 12 13 14 15 16]
[17 18 19 20 21 22 23 24]]
---------------------------------
[[ 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8]
[ 9 10 11 12 13 14 15 16 9 10 11 12 13 14 15 16]
[17 18 19 20 21 22 23 24 17 18 19 20 21 22 23 24]]
---------------------------------
[1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 17, 18, 19, 20, 21, 22, 23, 24]
----------------------------------
[[ 1 2 3 4 5 6 7 8 1 2 3 4 5 6 7 8]
[ 9 10 11 12 13 14 15 16 9 10 11 12 13 14 15 16]
[17 18 19 20 21 22 23 24 17 18 19 20 21 22 23 24]]
-----------------------------------
[[ 1 2 3 4 5 6 7 8]
[ 9 10 11 12 13 14 15 16]
[17 18 19 20 21 22 23 24]
[ 1 2 3 4 5 6 7 8]
[ 9 10 11 12 13 14 15 16]
[17 18 19 20 21 22 23 24]]
###Markdown
تقسیم آرایه
###Code
a0=np.array([ [1,2,3,4,5,6,7,8],
[9,10,11,12,13,14,15,16],
[17,18,19,20,21,22,23,24]])
e0=np.hsplit(a0,2)
e1=np.vsplit(a0,3)
print(a0)
print("---------------------------------")
print(e0)
print("---------------------------------")
print(e1)
e1[1]
###Output
[[ 1 2 3 4 5 6 7 8]
[ 9 10 11 12 13 14 15 16]
[17 18 19 20 21 22 23 24]]
---------------------------------
[array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12],
[17, 18, 19, 20]]), array([[ 5, 6, 7, 8],
[13, 14, 15, 16],
[21, 22, 23, 24]])]
---------------------------------
[array([[1, 2, 3, 4, 5, 6, 7, 8]]), array([[ 9, 10, 11, 12, 13, 14, 15, 16]]), array([[17, 18, 19, 20, 21, 22, 23, 24]])]
###Markdown
پیمایش
###Code
a0=np.array([ [1,2,3,4,5,6,7,8],
[9,10,11,12,13,14,15,16],
[17,18,19,20,21,22,23,24]])
a=a0.shape
a1=np.ones(a,np.int8)
print(a1)
print(type(a1))
for i0 in a0:
for i1 in i0:
s=i1
s=s*2
print(s)
for x in np.nditer(a0):
print(x)
a0=np.array([ [1,2,3,4,5,6,7,8],
[9,10,11,12,13,14,15,16],
[17,18,19,20,21,22,23,24]])
for x in np.nditer(a0, flags=['buffered'],op_dtypes=['S']):
print(x)
print(a0)
print("----------------------------------")
print(a0[:,::2])
print("-----------------------------------")
print(a0[1,3])
print("---------------------------------------------------------------------------------------")
q=np.shape(a0)
q=list(q)
print("---------------------------------------------------------------------------------------")
for idx , x in np.ndenumerate(a0):
print(a0[idx])
a0[idx]=a0[idx]+1
print(a0)
###Output
b'1'
b'2'
b'3'
b'4'
b'5'
b'6'
b'7'
b'8'
b'9'
b'10'
b'11'
b'12'
b'13'
b'14'
b'15'
b'16'
b'17'
b'18'
b'19'
b'20'
b'21'
b'22'
b'23'
b'24'
[[ 1 2 3 4 5 6 7 8]
[ 9 10 11 12 13 14 15 16]
[17 18 19 20 21 22 23 24]]
----------------------------------
[[ 1 3 5 7]
[ 9 11 13 15]
[17 19 21 23]]
-----------------------------------
12
---------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
[[ 2 3 4 5 6 7 8 9]
[10 11 12 13 14 15 16 17]
[18 19 20 21 22 23 24 25]]
###Markdown
جبر خطی
###Code
a0=np.array([ [1,2,3],
[9,10,11],
[17,18,19]])
c0=np.trace(a0)
c1=np.linalg.matrix_rank(a0)
c2=np.linalg.det(a0)
c3=np.linalg.eigh(a0)
print(c0)
print("-------------------------------------------------------")
print(c1)
print("-------------------------------------------------------")
print(c2)
print("-------------------------------------------------------")
print(c3)
print("--------------------------------------------------------------------------")
a=np.array([ [1,2],
[3,4]
])
b=np.array(
[8,18]
)
x=np.linalg.solve(a,b)
print(a)
print(b)
print(x)
print("--------------------------------------------------------------------------")
# x co-ordinates
x = np.arange(0, 9)
A = np.array([x, np.ones(9)])
# linearly generated sequence
y = [19, 20, 20.5, 21.5, 22, 23, 23, 25.5, 24]
# obtaining the parameters of regression line
w = np.linalg.lstsq(A.T, y)[0]
# plotting the line
line = w[0]*x + w[1] # regression line
plt.plot(x, line, 'r-')
plt.plot(x, y, 'o')
plt.show()
import matplotlib.pyplot as plot
mean=0
var=1
a=np.random.normal(0,1,(1,20))
print(a)
count, bins, ignored = plot.hist(a, 30, normed=True)
plot.plot(bins, 1/(var * np.sqrt(2 * np.pi)) *
np.exp( - (bins - mean)**2 / (2 * var**2) ),
linewidth=2, color='r')
plot.show()
a=numpy.arrange(1,10,1)
print(a)
###Output
_____no_output_____
###Markdown
Make some arrays
###Code
a = np.arange(0,10)
print(a)
a = a.reshape((2,5))
print(a)
b = np.arange(10,20).reshape((2,5))
print(b)
###Output
[[10 11 12 13 14]
[15 16 17 18 19]]
###Markdown
Put some arrays together
###Code
c = np.hstack([a,b])
print(c)
d = np.vstack([a,b])
print(d)
b = numpy.array([[2, 100, 238],
[2, 100, 237],
[4, 100, 237],
[4, 100, 236]])
numpy.mean(a, axis=0)
###Output
_____no_output_____
###Markdown
Table of Contents1 ndarray creation1.1 Sampling2 reshape3 transformation3.1 element-wise3.2 shape-changing3.2.1 Matrix multiplication3.2.2 Vector length4 Deep Learning4.1 log loss / cross-entropy4.2 Weight Initialization
###Code
import numpy as np
np.set_printoptions(suppress=True)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all" # print all cell outputs and not just the last one
###Output
_____no_output_____
###Markdown
ndarray creation
###Code
np.array([1,2,3])
np.array([[1, 2], [3, 4]]) # shape (2,2)
# create evenly spaced on log space
alphas = np.logspace(0, -4, num=5, base=10.)
np.arange(1, 4) # [1 2 3]
x = np.arange(1, 10, dtype=np.float64).reshape(3,3)
np.random.permutation(x)
np.zeros((3, 1)) # dtype('float64')
np.zeros_like(x) # same shape as x
ones = np.ones((32, 1), dtype=np.int16)
###Output
_____no_output_____
###Markdown
Sampling
###Code
# create array of shape 32x32x3 with random samples from uniform distribution over [0, 1)
img = np.random.rand(32, 32, 3)
# sample array of shape 5x1 from standard normal distribution (mean 0 & variance 1)
np.random.randn(5, 1)
y = np.random.randint(1, 5, (3,1))
# sample without replacement
# can be used to create indices to randomly divide dataset
np.random.permutation(10)
###Output
_____no_output_____
###Markdown
reshape
###Code
# create 1-column vector from array
img_flat = img.reshape(np.prod(img.shape), 1)
img_flat.shape
# alternative way
# np docs : "One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions."
img_flat2 = img.reshape(-1, 1)
np.unique(img_flat == img_flat2, return_counts=True)
###Output
_____no_output_____
###Markdown
transformation element-wise
###Code
# most functionality is available as ndarray method and np function
np.round(x, 2) # x.round(2)
# element-wise multiplication
x * 2 # np.multiply(x, 2)
# all 3 approaches calculate square element-wise
# np.power(x, 2)
# np.square(x)
x**2
# misc methods
np.exp(x)
# np.tanh(x)
# np.log(x)
# probabilistic classification
y_prob = np.random.rand(10, 1)
y_pred = (y_prob > 0.5)
np.mean(y_pred) # numpy can convert boolean to 1/0
###Output
_____no_output_____
###Markdown
shape-changing
###Code
np.sum(x)
x.T # transpose
###Output
_____no_output_____
###Markdown
Dot product
###Code
np.dot(x, y)
###Output
_____no_output_____
###Markdown
Matrix multiplication
###Code
np.matmul(x, y)
# Python 3.5 adds magic function __matmul__ which can be called with '@'
x @ y
###Output
_____no_output_____
###Markdown
Vector length
###Code
x = np.arange(1,4) # [1 2 3]
np.linalg.norm(x) # length of the vector: sqrt(1**2 + 2**2 + 3**3) == sqrt(14) == 3.74...
###Output
_____no_output_____
###Markdown
Deep Learning log loss / cross-entropy
###Code
Y = np.array([1, 0, 1, 1, 0])
Y_pred = np.array([0.8, 0.1, 0.9, 0.6, 0.05])
m = Y.shape[0]
# option 1: using matrix multiplication
cost = -1/m * (np.dot(Y.T, np.log(Y_pred)) + np.dot(1-Y.T, np.log(1-Y_pred)))
# option 2: using np.sum + element-wise multiplication
cost2 = -1/m * (np.sum(Y * np.log(Y_pred)) + np.sum((1-Y) * np.log(1-Y_pred)))
cost2
# option 3: scikit-learn :-)
from sklearn.metrics import log_loss
log_loss(Y, Y_pred)
###Output
_____no_output_____
###Markdown
Weight Initialization
###Code
layers_dims = [1024, 512, 512, 256, 1]
l = 1
# Xavier
np.random.randn(layers_dims[l], layers_dims[l-1]) * np.sqrt(1./layers_dims[l-1])
# He et al. 2015
np.random.randn(layers_dims[l], layers_dims[l-1]) * np.sqrt(2./layers_dims[l-1])
###Output
_____no_output_____
###Markdown
단위 행렬이란?대각선은 모두 1이고 나머지는 모두 0인 행렬 (3,)과 (3,1)의 차이? (3,) : 원소가 3개인 1차원 행렬 (3,1) : 원소가 3개이고 3행1열로 이루어진 2차원 행렬 resize와 reshape의 차이?reshape는 행렬의 모양만 바꾸지만, resize는 행렬의 원소 수도 바꿀 수 있다.
###Code
import numpy as np
#(1)1~25 사이의 2의 배수 12개로 1차원 배열(a) 생성, 출력
print("(1)")
a=np.arange(2, 25, 2)
print("a=",a)
#(2)a 의 인덱스 2~5의 요소값을 -20으로 수정, 출력
print("\n(2)")
a[2:6]=[-20]
print("a[2~5]=",a[2:6])
#(3)a의 끝에서 두번째부터 끝가지 요소값을 2배하여 저장, 출력
print("\n(3)")
a[-2:]=a[-2:]*2
print("a[-2:]=", a[-2:])
#(4)1~9사이 임의값 12개로 배열 b 생성, 출력
print("\n(4)")
b=np.random.randint(1, 10, 12)
print("b=",b)
#(5)b 배열을 10배하여 c에 저장
print("\n(5)")
c=b*10
print("c=",c)
#(6)a배열과 b배열을 더하여 d 배열 생성
print("\n(6)")
d=a+b
print("d=",d)
#(7)b 배열의 최대,최소,평균 출력
print("\n(7)")
print("max={}, min={}, mean={}".format(max(b),min(b),b.mean()))
#(8)a에서 b 배열의 평균보다 큰값을 추출
print("\n(8)")
f=a>b.mean()
print("a에서 b배열의 평균보다 큰값=",a[f])
#(1)1~25 사이의 2의 배수 12개로 1차원 배열(a) 생성, 출력
print("(1)")
a=np.arange(2, 25, 2)
print("a=",a)
#(2)a를 이용하여 3X4 배열로 변경하여 b생성, 출력
print("\n(2)")
b=a.reshape(3, 4)
print("b=",b)
#(3)a를 2X6 배열로 변경, 출력
print("\n(3)")
a=a.reshape(2, 6)
print("a=",a)
#(4)a를 1행의 모든 값을 추출하여 a1을 만들고, a1의 모든 값을 0으로 변경, a1,a출력
print("\n(4)")
a1=a[1,:].copy()
print("a1=",a1)
a1[0:]=0
print("a1={}\na={}".format(a1,a))
#(5)1~10 사이의 임의의 값으로 3X3 배열 x, y생성, 출력
print("\n(5)")
x=np.random.randint(1, 11, size=(3, 3))
y=np.random.randint(1, 11, size=(3, 3))
print("x={}\ny={}".format(x, y))
#(6)x의 1, 2 행의 모든 열추출, 출력
print("\n(6)")
print("x[1,:]={}\nx[2,:]={}".format(x[1,:],x[2,:]))
#(7)x의 2열의 모든 행 추출, 출력
print("\n(7)")
print("x[:,2]=",x[:,2])
#(8)x의 0,2열의 1,2 행 추출, 출력
print("\n(8)")
print("x[1:3,0]={}\nx[1:3,2]={}".format(x[1:3,0],x[1:3,2]))
#(9)x의 각행의 합, 각 열의 합, 출력
print("\n(9)")
print("x[0,:]sum={}\nx[1,:]sum={}\nx[2,:]sum={}".format(x[0,:].sum(), x[1,:].sum(), x[2,:].sum()))
print("x 각 행의 합=",x.sum(axis=1))
print("x[:,0]sum={}\nx[:,1]sum={}\nx[:,2]sum={}".format(x[:,0].sum(), x[:,1].sum(), x[:,2].sum()))
print("x 각 열의 합=",x.sum(axis=0))
#(10)x의 각행의 최대, 각 열의 최대, 출력
print("\n(10)")
print("x[0,:]max={}\nx[1,:]max={}\nx[2,:]max={}".format(x[0,:].max(), x[1,:].max(), x[2,:].max()))
print("x[:,0]max={}\nx[:,1]max={}\nx[:,2]max={}".format(x[:,0].max(), x[:,1].max(), x[:,2].max()))
#(11)x와 y의 합 결과 출력
print("\n(11)")
print("x+y=",x+y)
#(12)X를 1차원으로 변경하여 x1 생성
x1=x.ravel()
print("x1=",x1)
#(1) 1~3, 10~30, 100~300 3개의 1차원 배열(a1,a2,a3)을 생성하여
# 행으로 결합된 y배열 생성
print("\n(1)")
a1=np.arange(1, 4)
a2=np.arange(10, 40, 10)
a3=np.arange(100, 400, 100)
y=np.vstack([a1, a2, a3])
print("y=",y)
#(2) y배열의 0,2 열을 복사하여 y1, 1열을 복사하여 y2 생성
print("\n(2)")
y1=np.hstack([y[:,0].copy(), y[:,2].copy()])
y2=y[:,1].copy()
print("y1={}\ny2={}".format(y1, y2))
#(3) y1, y2를 결합하여 yy배열 생성
print("\n(3)")
yy=np.hstack([y1, y2])
print("yy=",yy)
###Output
(1)
y= [[ 1 2 3]
[ 10 20 30]
[100 200 300]]
(2)
y1=[ 1 10 100 3 30 300]
y2=[ 2 20 200]
(3)
yy= [ 1 10 100 3 30 300 2 20 200]
###Markdown
NumPyNumPy is a Python library that provides `ndarray` (a multi-dimensional array structure) and a wide range of functions to perform operations on `ndarray` and mathematical operations. NumPy Documentation.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
The `ndarray` structureThe `ndarray` data structure is essential in NumPy. It is an `n`-dimensional array that can contain `dtype` objects.
###Code
my_vector = np.array([1, 2, 3, 4])
my_vector
my_vector.shape
my_vector.dtype
my_matrix = np.array([[1, 2], [3, 4]])
my_matrix
my_matrix.shape
# Find the length of each element in bytes
my_matrix.itemsize
my_matrix2 = np.array([[1, 2], [3, 4]], dtype=np.int8)
my_matrix2.itemsize
###Output
_____no_output_____
###Markdown
Array creation methods
###Code
# Create an uninitialised array of specified shape and dtype
np.empty(shape=(4,4),dtype=np.int8)
np.zeros(4)
np.zeros((4,4))
np.zeros((4,4)) + 42
# Create a new zero matrix of the same shape as another matrix.
np.zeros_like(my_matrix)
np.ones(4)
np.ones((4,4))
# Similar to Python's built-in range() function
np.arange(start=0, stop=10, step=2)
# Like arange() but instead of a step size, we specify the
# number of values that we need. It generates lineary-spaced
# numbers in the given interval
np.linspace(start=10, stop=20, num=5)
# Generate numbers that are evenly spaced on a logarithmic scale
np.logspace(start=1, stop=2, num=10)
###Output
_____no_output_____
###Markdown
Aggregate methods (min and max)
###Code
arr1 = np.array([10, 87, 86, 5, 4, 38, 94, 76, 12, 17])
arr1
arr1.max(), arr1.argmax()
arr1.min(), arr1.argmin()
arr1_copy = arr1.copy()
arr1_copy
###Output
_____no_output_____
###Markdown
Summations
###Code
matrix1 = np.arange(1,26).reshape(5,5)
matrix1
# Sum values in the matrix
matrix1.sum()
# Sum values by column
matrix1.sum(0)
###Output
_____no_output_____
###Markdown
Transform a 1D array into a 2D array
###Code
prime_numbers = np.array([2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97])
prime_numbers
prime_numbers.reshape(5, 5)
###Output
_____no_output_____
###Markdown
Identity Matrix
###Code
np.eye(4)
###Output
_____no_output_____
###Markdown
Random Numbers
###Code
# Use NumPy to generate four random numbers between 0 and 1
np.random.rand(4)
np.random.rand(4, 4)
np.random.randint(1, 101)
# Generates 10 random integers between 1 and 100
np.random.randint(1, 101, 10)
###Output
_____no_output_____
###Markdown
Sample from the normal distribution
###Code
# Generate four numbers from the normal distribution centred around zero
np.random.randn(4)
np.random.randn(4, 4)
###Output
_____no_output_____
###Markdown
Indexing and slicing
###Code
arr2 = np.arange(0, 101, 10)
arr2
arr2[2]
# Use Python's slice notation to fetch elements from the array
arr2[3:6]
arr2[3:]
arr2[:4]
# Boolean indexing
arr2[arr2 > 5]
arr2 > 5
arr_with_nans = np.array([np.nan, 1,2,np.nan,3,4,5])
arr_with_nans
# Get an array where NaN elements are omitted
arr_with_nans[~np.isnan(arr_with_nans)]
matrix2 = np.arange(1, 26).reshape(5,5)
matrix2
matrix2[1]
matrix2[1,2] # same as matrix[1][2]
matrix2[1:4,1:4]
# Use ellipsis to get elements from the third column
matrix2[...,2]
# Fetch elements placed at corners of the 5x5 array
rows = np.array([[0,0],[4,4]])
cols = np.array([[0,4],[0,4]])
matrix2[rows, cols]
###Output
_____no_output_____
###Markdown
TilingTiling is a common and useful operation when we need to extend an array so that its shape matches another NumPy array. By doing so, we can use the 'tiled' array in applying some element-wise operation between them.
###Code
arr3 = np.array([9, 4, 4])
arr3
np.tile(arr3, (4, 1))
np.tile(arr3, (5, 2))
###Output
_____no_output_____
###Markdown
Broadcasting*Broadcasting* is Numpy's terminology for performing mathematical operations between arrays of different shapes. If certain assumptions hold, the smaller of the two arrays is said to be broadcast to the size of the larger array in order to make the two arrays compatible so element-to-element operations can be performed. Read more.
###Code
macro_nutrients = np.array([[0.3, 2.5, 3.5],
[2.9, 27.5, 0],
[0.4, 1.3, 23.9],
[14.4, 6, 2.3]])
calories_per_macro = np.array([9, 4, 4])
macro_nutrients * calories_per_macro
arr4 = np.arange(0, 10)
arr4
arr4[0:4] = 10
arr4
###Output
_____no_output_____
###Markdown
Arithmetic
###Code
arr5 = np.arange(0, 10)
arr6 = np.arange(10, 20)
arr5
arr6
arr5 + arr6
arr6 - arr5
arr5 * arr6
arr5 + 10 # broadcasting
arr6 - 10
arr5 ** 2
# NumPy generates a warning if we attempt to divide by zero.
arr5 / arr5
1 / arr5
###Output
C:\Users\omar\Anaconda3\lib\site-packages\ipykernel_launcher.py:1: RuntimeWarning: divide by zero encountered in true_divide
"""Entry point for launching an IPython kernel.
###Markdown
Universal functionsA universal function (ufunc) is a mathematical function that operates on arrays in an element-by-element fashion supporting array broadcasting, type casting, and several other standard features. For more information read the ufunc documentation.
###Code
arr7 = np.array([2, 6, 7, 10, 45, 200])
arr7
# Computes the square root of each element of the array
np.sqrt(arr7)
np.exp(arr7)
np.log(arr7)
###Output
_____no_output_____
###Markdown
Numpy [http://www.numpy.org/](http://www.numpy.org/) Errors
###Code
x = 1.0 / 3.0
print(x)
y = 10**30
print(y)
print(x * y)
print('{0:f}'.format(x * y))
###Output
0.3333333333333333
1000000000000000000000000000000
3.333333333333333e+29
333333333333333316505293553664.000000
###Markdown
Basics
###Code
import numpy as np
x = np.array([1,2,3,4])
x
x = np.array([[1,2,3,4], [5,6,7,8]])
x
x = np.ones([2,3])
x
x = np.zeros([2,3])
x
x = np.array([[1.0, 2.0], [3.0, 4.0]])
y = np.array([[5.0, 6.0], [7.0, 8.0]])
print(x)
print(y)
print(x + y)
print(x - y)
print(x * y)
print(np.matmul(x, y))
print(x**2)
###Output
[[ 1. 4.]
[ 9. 16.]]
###Markdown
Types
###Code
x = np.array([[1, 2], [3, 4]])
x.dtype
x = np.array([[1.0, 2.0], [3.0, 4.0]])
x.dtype
x = np.array([[1, 2], [3.0, 4.0]])
x.dtype
x = np.array([[True, False],[False, True]])
print(x)
x.dtype
x.astype(int)
###Output
_____no_output_____
###Markdown
Slicing
###Code
x = np.arange(0.0, 10.0, 0.1)
x
x.shape
x = x.reshape([10, 10])
x
x[0]
x[:,0]
x[2,3]
x[4:8,1:4]
x = np.array([[1.0, 2.0], [3.0, 4.0]])
y = np.array([[5.0, 6.0], [7.0, 8.0]])
print(x)
print(y)
np.vstack([x, y])
np.hstack([x, y])
x = np.array([1.0, 2.0])
y = np.array([[5.0, 6.0], [7.0, 8.0]])
print(x)
print(y)
np.vstack([x,y])
np.hstack([x,y])
###Output
_____no_output_____
###Markdown
Tutorial [https://docs.scipy.org/doc/numpy/user/quickstart.html](https://docs.scipy.org/doc/numpy/user/quickstart.html)
###Code
import numpy as np
import matplotlib.pyplot as pl
def mandelbrot(h, w, maxit=20):
"""Returns an image of the Mandelbrot fractal of size (h,w)."""
y, x = np.ogrid[-1.4:1.4:h*1j, -2:0.8:w*1j]
c = x + y * 1j
z = c
divtime = maxit + np.zeros(z.shape, dtype=int)
for i in range(maxit):
z = z**2 + c
diverge = z * np.conj(z) > 2**2 # who is diverging
div_now = diverge & (divtime==maxit) # who is diverging now
divtime[div_now] = i # note when
z[diverge] = 2 # avoid diverging too much
return divtime
pl.imshow(mandelbrot(400,400))
###Output
_____no_output_____
###Markdown
--- NumPy Data X **Author:** Alexander Fred-Ojala & Ikhlaq Sidhu**License Agreement:** Feel free to do whatever you want with this code___ Introduction to NumPy What is NumPy: NumPy stands for Numerical Python and it is the fundamental package for scientific computing with Python. It is a package that lets you efficiently store and manipulate numerical arrays. It contains among other things:* a powerful N-dimensional array object* sophisticated (broadcasting) functions* tools for integrating C/C++ and Fortran code* useful linear algebra, Fourier transform, and random number capabilities NumPy contains an array object that is "fast"It stores:* location of a memory block (allocated all at one time)* a shape (3 x 3 or 1 x 9, etc)* data type / size of each elementThe core feauture that NumPy supports is its multi-dimensional arrays. In NumPy, dimensions are called axes and the number of axes is called a rank.
###Code
# written for Python 3.6
import numpy as np
np.__version__ # made for v. 1.13.3
###Output
_____no_output_____
###Markdown
Creating a NumPy Array: - 1. Simplest possible: We use a list as an argument input in making a NumPy Array
###Code
# Create array from Python list
list1 = [1, 2, 3, 4]
data = np.array(list1)
data
# Find out object type
type(data)
# See data type that is stored in the array
data.dtype
# The data types are specified for the full array, if we store
# a float in an int array, the float will be up-casted to an int
data[0] = 3.14159
print(data)
# NumPy converts to most logical data type
list2 = [1.2, 2, 3, 4]
data2 = np.array(list2)
print(data2)
print(data2.dtype) # all values will be converted to floats if we have one
# We can manually specify the datatype
list3 = [1, 2, 3]
data3 = np.array(list3, dtype=str) #manually specify data type
print(data3)
print(data3.dtype)
# lists can also be much longer
list4 = range(100001)
data = np.array(list4)
data
len(data) # to see the length of the full array
# data = np.array(1,2,3,4, 5,6,7,8,9) # wrong
data = np.array([1,2,3,4,5,6,7,8,9]) # right
data
# see documentation, the first keyword is the object to be passed in
np.array?
###Output
_____no_output_____
###Markdown
More info on data types can be found here:https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html Accessing elements: Slicing and indexing
###Code
# Similar to indexing and slicing Python lists:
print(data[:])
print (data[0:3])
print (data[3:])
print (data[::-2])
# more slicing
x = np.array(range(25))
print ('x:',x)
print()
print (x[5:15:2])
print (x[15:5:-1])
###Output
x: [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24]
[ 5 7 9 11 13]
[15 14 13 12 11 10 9 8 7 6]
###Markdown
Arrays are like lists, but differentNumPy utilizes efficient pointers to a location in memory and it will store the full array in memory. Lists on the other hand are pointers to many different objects in memory.
###Code
# Slicing returns a view in Numpy,
# and not a copy as is the case with Python lists
data = np.array(range(10))
view = data[0:3]
view
l = list(range(10))
copy = l[0:3]
copy
copy[0] = 99
view[0] = 99
print(copy)
print(view)
print('Python list:',l) # has not changed
print('NumPy array:',data) # has changed
# Creating copies of the array instead of views
arr_copy = data[:3].copy()
print('Array copy',arr_copy)
arr_copy[0] = 555
print('New array copy',arr_copy)
print('Original array',data) # now it is not a view any more
# same thing with assignment, its not a copy, its the same data
x = np.array(range(25))
print (x)
y = x
y[:] = 0
print (x)
x is y
###Output
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24]
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
###Markdown
Arrays are a lot faster than lists
###Code
# Arrays are faster and more efficient than lists
x = list(range(100000))
y = [i**2 for i in x]
print (y[0:5])
# Time the operation with some IPython magic command
print('Time for Python lists:')
list_time = %timeit -o -n 20 [i**2 for i in x]
z = np.array(x)
w = z**2
print(w[:5])
print('Time for NumPy arrays:')
np_time = %timeit -o -n 20 z**2
print('NumPy is ' + str(list_time.all_runs[0]//np_time.all_runs[0]) + ' times faster than lists at squaring 100 000 elements.')
###Output
NumPy is 208.0 times faster than lists at squaring 100 000 elements.
###Markdown
Universal functionsA function that is applied on an `ndarray` in an element-by-element fashion. Several universal functions can be found the NumPy documentation here:https://docs.scipy.org/doc/numpy-1.13.0/reference/ufuncs.html
###Code
# Arrays are different than lists in another way:
# x and y are lists
x = list(range(5))
y = list(range(5,10))
print ("list x = ", x)
print ("list y = ", y)
print ("x + y = ", x+y)
# now lets try with NumPy arrays:
xn = np.array(x)
yn = np.array(y)
print ('np.array xn =', xn)
print ('np.array xn =', yn)
print ("xn + yn = ", xn + yn)
# + for np.arrays is a wrapper around the function np.add
np.add(xn,yn)
# An array is a sequence that can be manipulated easily
# An arithmatic operation is applied to each element individually
# When two arrays are added, they must have the same size
# (otherwise they might be broadcasted)
print (3* x)
print (3 * xn)
###Output
[0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
[ 0 3 6 9 12]
###Markdown
Join, add, concatenate
###Code
print(xn)
print(yn)
# if you need to join numpy arrays, try hstack, vstack, column_stack, or concatenate
print (np.hstack((xn,yn)))
print (np.vstack((xn,yn)))
print (np.column_stack((xn,yn)))
print (np.concatenate((xn, yn), axis = 0))
# the elements of an array must be of a type that is valid to perform
# a specific mathematical operation on
data = np.array([1,2,'cat', 4])
print(data)
print(data.dtype)
print (data+1) # results in error
###Output
['1' '2' 'cat' '4']
<U21
###Markdown
Creating arrays with 2 axis:
###Code
# This list has two dimensions
list3 = [[1, 2, 3],
[4, 5, 6]]
list3 # nested list
# data = np.array([[1, 2, 3], [4, 5, 6]])
data = np.array(list3)
data
###Output
_____no_output_____
###Markdown
Attributes of a multidim array
###Code
print('Dimensions:',data.ndim)
print ('Shape:',data.shape)
print('Size:', data.size)
# You can also transpose an array Matrix with either np.transpose(arr)
# or arr.T
print ('Transpose:')
data.T
# print (list3.T) # note, this would not work
###Output
Transpose:
###Markdown
Other ways to create NumPy arrays
###Code
# np.arange() is similar to built in range()
# Creates array with a range of consecutive numbers
# starts at 0 and step=1 if not specified. Exclusive of stop.
np.arange(12)
#Array increasing from start to end: np.arange(start, end)
np.arange(10, 20)
# Array increasing from start to end by step: np.arange(start, end, step)
# The range always includes start but excludes end
np.arange(1, 10, 2)
# Returns a new array of specified size, filled with zeros.
array=np.zeros((2,5), dtype=np.int8)
array
#Returns a new array of specified size, filled with ones.
array=np.ones((2,5), dtype=np.float128)
array
# Returns the identity matrix of specific squared size
array = np.eye(5)
array
###Output
_____no_output_____
###Markdown
Some useful indexing strategies There are two main types of indexing: Integer and Boolean
###Code
x = np.array([[1, 2], [3, 4], [5, 6]])
x
###Output
_____no_output_____
###Markdown
Integer indexing
###Code
# first element is the row, 2nd element is the column
print(x[1,0])
print(x[1:,:]) # all rows after first, all columns
# first list contains row indices, 2nd element contains column indices
idx = x[[0,1,2], [0,1,1]] # create index object
print (idx)
###Output
[1 4 6]
###Markdown
Boolean indexing
###Code
print('Comparison operator, find all values greater than 3:\n')
print(x>3)
print('Boolean indexing, only extract elements greater than 3:\n')
print(x[x>3])
###Output
Boolean indexing, only extract elements greater than 3:
[4 5 6]
###Markdown
Masks
###Code
arr
arr = np.arange(10)
mask = arr>5
print(mask)
arr[mask]
# Functions any / all
print( np.any( arr==9 ) )
print( np.all( arr>-1 ) )
###Output
True
True
###Markdown
Extra NumPy array methods
###Code
# Reshape is used to change the shape
a = np.arange(0, 15)
print('Original:',a)
a = a.reshape(3, 5)
# a = np.arange(0, 15).reshape(3, 5) # same thing
print ('Reshaped:')
print(a)
# We can also easily find the sum, min, max, .. are easy
print (a)
print ('Sum:',a.sum())
print('Min:', a.min())
print('Max:', a.max())
print ('Sum along columns:',a.sum(axis=0))
print ('Sum along rows:',a.sum(axis=1))
# Note here axis specifies what dimension to "collapse"
###Output
Sum along columns: [15 18 21 24 27]
Sum along rows: [10 35 60]
###Markdown
Arrray Axis To get the cumulative product:
###Code
print (np.arange(1, 10))
print (np.cumprod(np.arange(1, 10)))
###Output
[1 2 3 4 5 6 7 8 9]
[ 1 2 6 24 120 720 5040 40320 362880]
###Markdown
To get the cumulative sum:
###Code
print (np.arange(1, 10))
np.cumsum((np.arange(1, 10)))
###Output
[1 2 3 4 5 6 7 8 9]
###Markdown
Creating a 3D array:
###Code
a = np.arange(0, 96).reshape(2, 6, 8)
print(a)
# The same methods typically apply in multiple dimensions
print (a.sum(axis = 0))
print ('---')
print (a.sum(axis = 1))
###Output
[[ 48 50 52 54 56 58 60 62]
[ 64 66 68 70 72 74 76 78]
[ 80 82 84 86 88 90 92 94]
[ 96 98 100 102 104 106 108 110]
[112 114 116 118 120 122 124 126]
[128 130 132 134 136 138 140 142]]
---
[[120 126 132 138 144 150 156 162]
[408 414 420 426 432 438 444 450]]
###Markdown
More ufuncs and Basic Operations One of the coolest parts of NumPy is the ability for you to run mathematical operations on top of arrays. Here are some basic operations:
###Code
a = np.arange(11, 21)
b = np.arange(0, 10)
print ("a = ",a)
print ("b = ",b)
print (a + b)
a * b
a ** 2
###Output
_____no_output_____
###Markdown
You can even do things like matrix operations
###Code
a.dot(b)
# Matrix multiplication
c = np.arange(1,5).reshape(2,2)
print ("c = \n", c)
print()
d = np.arange(5,9).reshape(2,2)
print ("d = \n", d)
print (d.dot(c))
np.matmul(d,c)
###Output
_____no_output_____
###Markdown
Random numbers
###Code
# Radom numbers
np.random.seed(0) # set the seed to zero for reproducibility
print(np.random.uniform(1,5,10)) # 10 random uniform numbers from 1 to 5
print()
print (np.random.exponential(1,5)) # 5 random exp numbers with rate 1
print (np.random.random(8).reshape(2,4)) #8 random 0-1 in a 2 x 4 array
###Output
[[ 0.0871293 0.0202184 0.83261985 0.77815675]
[ 0.87001215 0.97861834 0.79915856 0.46147936]]
###Markdown
If you want to learn more about "random" numbers in NumPy go to: https://docs.scipy.org/doc/numpy-1.12.0/reference/routines.random.html Trignometric functions
###Code
# linspace: Create an array with numbers from a to b
# with n equally spaced numbers (inclusive)
data = np.linspace(0,10,5)
print (data)
from numpy import pi
x = np.linspace(0,pi, 3)
print('x = ', x)
print()
print ("sin(x) = ", np.sin(x))
# flatten matrices using ravel()
x = np.array(range(24))
x = x.reshape(4,6)
print('Original:\n',x)
print()
x = x.ravel() # make it flat
print ('Flattened:\n',x)
###Output
Original:
[[ 0 1 2 3 4 5]
[ 6 7 8 9 10 11]
[12 13 14 15 16 17]
[18 19 20 21 22 23]]
Flattened:
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23]
###Markdown
numpy?---- numerical python- 복잡한 행렬, 수치 연산을 빠르고 간편하게! In this tutorial- Introduction : numpy 설치와 python↔numpy 비교합니다.- array : numpy 의 기본 자료구조 단위인 `array`에 대해 배웁니다.- array creation : `array` 를 어떻게 생성할 수 있는 지에 대해 배웁니다.- dtype : 데이터의 타입을 나타내는 `dtype` 에 대해 배웁니다.- basic operations : 기본적인 사칙연산과 내장된 단항연산에 대해 배웁니다.- Indexing, Slicing and Iterating : 인덱싱, 슬라이싱 그리고 반복(순회)에 대해 배웁니다.- methods : 기타 유용한 내장 함수들을 배웁니다. Introduction installation
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
python
###Code
size = 10000000 # '10,000,000'
l, m = list(range(size)), list(range(size-1, -1, -1))
%%timeit
[x+y for x, y in zip(l, m)]
%%timeit
[x * 3 for x in l]
###Output
1.2 s ± 56.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
###Markdown
numpy
###Code
a, b = np.arange(size), np.arange(size-1, -1, -1)
%%timeit
a + b
%%timeit
a * 3
###Output
22.1 ms ± 1.28 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
###Markdown
array---numpy 의 주요 object 는 `homogeneous multidimensional array` 이다. → `table of elements, all of the same type` numpy 의 array class 는 `ndarray` 라고 불리며, 보통 `array` 라는 별칭으로 쓰인다.
###Code
print(type(np.array([1, 2])))
###Output
<class 'numpy.ndarray'>
###Markdown
numpy 에서 `dimension` 은 `axes` 라고 불린다.
###Code
l = [[1, 2, 3, 4], [5, 6, 7, 8]]
a = np.array(l)
print(a)
###Output
[[1 2 3 4]
[5 6 7 8]]
###Markdown
`shape` 속성은 `array` 의 `dimension` 을 나타낸다. tuple 의 각각의 숫자는 각 `dimension` 의 size 를 나타낸다. 예를 들어 *n* 행 *m* 열을 가지는 행렬(matrix)의 경우 `shape` 는 `(n, m)` 이 된다.
###Code
print(a.shape)
###Output
(2, 4)
###Markdown
array creation---- array 함수에 python list 또는 tuple (array_like) 를 전달- arange 함수 사용- zeros, ones 사용
###Code
np.array([1, 2])
np.arange(5)
np.zeros((5, 5))
np.ones((3, 6))
###Output
_____no_output_____
###Markdown
python listpython list 는 하나의 리스트안에 여러개의 데이터타입을 가질 수 있다.
###Code
l = [1, "2"]
print([type(each) for each in l])
###Output
[<class 'int'>, <class 'str'>]
###Markdown
numpy array반면, numpy 는 배열안에 오직 한 가지 데이터타입만 가질 수 있다
###Code
a = np.array([1, "2"])
print([type(each) for each in a])
###Output
[<class 'numpy.str_'>, <class 'numpy.str_'>]
###Markdown
dtype---`array` 안에 있는 원소(element)의 데이터타입을 나타내는 객체 reference: https://numpy.org/doc/stable/reference/arrays.dtypes.html
###Code
a = np.arange(5)
print(a.dtype.name)
a = np.array(["a", "b", "c", "문", "자", "열"])
print(a.dtype.name)
###Output
str32
###Markdown
basic operations---행렬에 관한 많은 산술 연산을 기본 산술연산자(+, -, *, / 등)으로 사용가능하다. `array` 에 있는 모든 원소들의 합과 같은 단항 연산자들도 `ndarray` class 에 구현되어 있어 편하게 사용가능하다.
###Code
a = np.array([[1, 1], [0, 1]])
b = np.array([[2, 0], [3, 4]])
print(a + b)
print(a * b)
###Output
[[2 0]
[0 4]]
###Markdown

###Code
print(a @ b)
print(a.sum()) # 1 +1 + 0 + 1 = 3
print(b.mean()) # (2 + 0 + 3 + 4) / 4 = 2.25
###Output
2.25
###Markdown
axis`axis` 인자를 명시해줘서 `array` 의 특정 `axis(축)` 에 대해 연산을 적용시킬 수 있다.- `axis=None`: element wise- `axis=0`: row wise- `axis=1`: column wise
###Code
a = np.array([[1, 2], [3, 4]])
print(a)
a.sum() # 모든 원소의 합 (=a.sum(axis=None))
a.sum(axis=0) # 각 열에 대한 합
a.sum(axis=1) # 각 행에 대한 합
###Output
_____no_output_____
###Markdown
Indexing, Slicing and Iterating---1차원(`one-dimensional`) `array` 는 일반 python list 처럼 indexing, slicing, iterating 될 수 있다.
###Code
a = np.arange(10) ** 2
print(a)
a[2]
a[:2]
a[:-5]
a[::-1]
for i in a:
print(i**(1/2))
###Output
0.0
1.0
2.0
3.0
4.0
5.0
6.0
7.0
8.0
9.0
###Markdown
다차원(`multidimensional`)`array`는 하나의 축 당 `index`를 가진다. 이러한 `index` 콤마(comma)로 분리된 튜플로 구분할 수 있다.
###Code
b = np.arange(12).reshape(3, 4)
print(b)
b[2, 3]
b[1:,1:]
b[-1]
###Output
_____no_output_____
###Markdown
다차원 `array`의 `iterating` 은 첫번째 축(`axis`) 기준으로 진행된다.
###Code
for index, row in enumerate(b):
print(f"{index}: {row}")
###Output
0: [0 1 2 3]
1: [4 5 6 7]
2: [ 8 9 10 11]
###Markdown
methods---- array creations: `arange`, `linspace`, ...- manipulations: `reshape`, `transpose`, ...- questions: `all`, `any`, `where`, ...- ordering: `argmax`, `searchsorted`, ...- basic statistics: `std`, ... `arange`: `start` 에서 `stop` 까지 주어진 `step` 을 interval(간격) 으로 가지는 `array` 를 반환
###Code
np.arange(0, 15, step=1)
###Output
_____no_output_____
###Markdown
`linspace`: `start` 에서 `stop` 까지 `num` 개로 동일한 interval(간격) 으로 나눈 `array` 를 반환
###Code
np.linspace(0, 15, num=10)
###Output
_____no_output_____
###Markdown
`reshape`: 주어진 `array`의 데이터로 `shape`를 가지는 새로운 `array` 를 반환
###Code
a = np.arange(10)
print(a)
a.reshape(2, 5)
###Output
_____no_output_____
###Markdown
`transpose`: `array` 의 축의 순서를 바꾼다. 행렬은(두 개의 축을 가지는 `array`)는 전치행렬을 반환(=행과 열을 바꿈)
###Code
a = np.arange(10).reshape(2, 5)
print(a)
np.transpose(a) # == a.T
###Output
_____no_output_____
###Markdown
`all`: 주어진 `array` 의 값이 모두 `True` 인지 여부 반환
###Code
a = np.arange(10)
print(a)
np.all(a < 10)
np.all(a < 5)
###Output
_____no_output_____
###Markdown
`any`: 주어진 `array`의 값이 하나라도 `True` 인지 여부 반환
###Code
a = np.arange(10)
print(a)
np.any(a==1)
np.any(a==-1)
###Output
_____no_output_____
###Markdown
`where`: 조건(`condition`)에 따라 `x` 또는 `y` 값을 반환, 아래식과 동치임```python[x if c else y for c, x, y in zip(condition, xs, ys)]```
###Code
a = np.arange(10)
print(a)
np.where(a<5, a, 10*a)
###Output
_____no_output_____
###Markdown
`argmax`: 지정된 축에 가장 최고값을 가지는 원소의 index 를 반환
###Code
a = np.array([10, 20, 30, 20, 10])
print(a)
np.argmax(a) # 2번 원소 (30)이 가장 큰 값을 가짐
###Output
_____no_output_____
###Markdown
`searchsorted`: 정렬된 `array` 에서 빠르게 값을 찾기 위해 `binary search`를 수행
###Code
a = np.array(range(100)) ** 2
print(a)
np.searchsorted(a, 1600) # 1600 이라는 값은 40번째 index 에 위치함
###Output
_____no_output_____
###Markdown
`std`: `array`의 특정 축에 대한 표준편차를 반환
###Code
np.array([1, 2, 3, 4]).std()
np.std(np.array([1, 2, 3, 4]))
###Output
_____no_output_____
###Markdown
Numpy to biblioteka służąca do operacji na macierzach
###Code
a = np.arange(15).reshape(3, 5)
print(a)
print("a size is {}".format(a.shape))
print('\n')
b = a.reshape(5,3)
print(b)
print('\n')
zero_matrix = np.zeros((2,3))
print(zero_matrix)
print('\n')
one_matrix = np.ones((4,2))
print(one_matrix)
###Output
[[ 0 1 2 3 4]
[ 5 6 7 8 9]
[10 11 12 13 14]]
a size is (3, 5)
[[ 0 1 2]
[ 3 4 5]
[ 6 7 8]
[ 9 10 11]
[12 13 14]]
[[0. 0. 0.]
[0. 0. 0.]]
[[1. 1.]
[1. 1.]
[1. 1.]
[1. 1.]]
###Markdown
Podstawowe operacje na macierzach:
###Code
A = np.array( [[1,1],
[0,1]] )
print("A")
print(A)
print('\n')
B = np.array( [[2,0],
[3,4]] )
print("B")
print(B)
print('\n')
print('B\'')
print(B.T)
print('\n')
print("A * B")
print(A * B)
print('\n')
print("A x B")
print(A.dot(B))
print('\n')
print("B x A")
print(B.dot(A))
print('\n')
print("A - B")
print(A-B)
print('\n')
print("A + B")
print(A+B)
print('\n')
print("A + 3")
print(A+3)
print('\n')
print("B^2")
print(B*2)
print('\n')
###Output
A
[[1 1]
[0 1]]
B
[[2 0]
[3 4]]
B'
[[2 3]
[0 4]]
A * B
[[2 0]
[0 4]]
A x B
[[5 4]
[3 4]]
B x A
[[2 2]
[3 7]]
A - B
[[-1 1]
[-3 -3]]
A + B
[[3 1]
[3 5]]
A + 3
[[4 4]
[3 4]]
B^2
[[4 0]
[6 8]]
###Markdown
ZadanieKorzystając z powyższych informacji i mnożenia macieży skonstruować "tabliczkę mnożenia".
###Code
###Output
_____no_output_____
###Markdown
[教程](http://www.naodongopen.com/272.html)
###Code
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
名称 作用ndarray.flags 有关数组的内存布局的信息。ndarray.shape 数组维数组。ndarray.strides 遍历数组时,在每个维度中步进的字节数组。ndarray.ndim 数组维数,在Python世界中,维度的数量被称为rank。ndarray.data Python缓冲区对象指向数组的数据的开始。ndarray.size 数组中的元素总个数。ndarray.itemsize 一个数组元素的长度(以字节为单位)。ndarray.nbytes 数组的元素消耗的总字节数。ndarray.base 如果内存是来自某个其他对象的基本对象。ndarray.dtype 数组元素的数据类型。ndarray.T 数组的转置。
###Code
a = np.arange(9)
b = a.reshape(3, 3)
b.size
b.flags
b.T
a = np.array([(2, 3, 4), (5, 6, 7)], dtype=np.uint64)
a.astype(np.bool_)
np.uint64([1, 2, 3])
a = np.arange(24)
a
a = np.arange(10)
a.resize(2, 5)
a
a[1][::2]
a = np.arange(10, 0, -1)
# a.resize(2,5)
a[np.array([3, 3, 1, 8])]
a = np.arange(35)
a.resize(5, 7)
a
###Output
_____no_output_____
###Markdown
等比数列,参数为点个数
###Code
N = 8
y = np.zeros(N)
x1 = np.linspace(0, 10, N, endpoint=True)
x2 = np.linspace(0, 10, N, endpoint=False)
plt.plot(x1, y, 'o')
plt.plot(x2, y + 0.5, 'o')
plt.ylim([-.5, 1])
plt.show()
###Output
_____no_output_____
###Markdown
等差数列, 参数为点个数, base=10np.logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None)``开始:base ** start``
###Code
np.logspace(2.0, 3.0, num=4)
np.logspace(2.0, 3.0, num=4, endpoint=False)
np.logspace(2.0, 3.0, num=4, base=2.0)
np.logspace(0.1, 1, N, endpoint=True)
N = 10
y1 = np.logspace(0.1, 1, N, endpoint=True)
y2 = np.logspace(0.1, 1, N, endpoint=True)
x = np.arange(
0,
10,
)
plt.plot(x, y1, 'o')
plt.plot(x + 0.5, y2, '^')
# plt.ylim([0, 1])
plt.show()
###Output
_____no_output_____
###Markdown
数据大小
###Code
a = np.arange(5)
a.dtype = np.float16
a.dtype
a = np.arange(5)
np.float16(a)
np.dtype([("name", np.str_, 40), ("numitems", np.int64), ("price",
np.float64)])
###Output
_____no_output_____
###Markdown
索引 单个元素
###Code
arr = np.array([[1, 2, 3, 4, 5], [4, 5, 6, 7, 8], [7, 8, 9, 10, 11]])
arr[0, 1]
###Output
_____no_output_____
###Markdown
取连续,用切片
###Code
arr[0, 3:5]
###Output
_____no_output_____
###Markdown
不连续,用元组
###Code
arr[([0, 1, 2], [1, 2, 3])]
arr[1:, (0, 2, 3)]
###Output
_____no_output_____
###Markdown
布尔索引
###Code
mask = np.array([1, 0, 1], dtype=np.bool)
arr
###Output
_____no_output_____
###Markdown
改变数组形状
###Code
arr = np.arange(12)
arr.reshape((3, 4)) # 有返回值
arr.resize((3, 4)) # 不返回值,在原数组上该变
arr
###Output
_____no_output_____
###Markdown
使用ravel函数展平数组
###Code
arr = np.arange(12).reshape(3, 4)
arr
arr.ravel() # 原数组不变
arr
###Output
_____no_output_____
###Markdown
横向展平(行优先)
###Code
arr.flatten()
###Output
_____no_output_____
###Markdown
纵向展平(列优先)
###Code
arr.flatten('F')
###Output
_____no_output_____
###Markdown
数组组合 - 使用hstack函数实现数组横向组合:np.hstack((arr1,arr2))- 使用vstack函数实现数组纵向组合:np.vstack((arr1,arr2))- 使用concatenate函数实现数组横向组合:np.concatenate((arr1,arr2),axis = 1))- 使用concatenate函数实现数组纵向组合:np.concatenate((arr1,arr2),axis = 0)) 切割数组 - 使用hsplit函数实现数组横向分割: np.hsplit(arr1, 2)- 使用vsplit函数实现数组纵向分割: np.vsplit(arr, 2)- 使用split函数实现数组横向分割: np.split(arr, 2, axis=1)- 使用split函数实现数组纵向分割: np.split(arr, 2, axis=0)
###Code
# 平均分割
x = np.arange(9.0)
np.split(x, 3)
x = np.arange(8.0)
np.split(x, [3, 5, 6, 10, 20]) # 分成6份,即使为空
###Output
_____no_output_____
###Markdown
创建与组合矩阵 1
###Code
# Interpret the input as a matrix
#Equivalent to ``matrix(data, copy=False)
matr1 = np.mat("1 2 3;4 5 6;7 8 9")
matr1
###Output
_____no_output_____
###Markdown
2
###Code
matr2 = np.matrix([[123], [456], [789]])
matr2
###Output
_____no_output_____
###Markdown
使用bmat函数合成矩阵:(分块矩阵组合)
###Code
A = np.mat('1 1; 1 1')
B = np.mat('2 2; 2 2')
C = np.mat('3 4; 5 6')
D = np.mat('7 8; 9 0')
np.bmat([[A, B], [C, D]])
np.c_[A, B]
np.bmat(np.r_[np.c_[A, B], np.c_[C, D]])
np.bmat('A,B; C,D')
###Output
_____no_output_____
###Markdown
矩阵的运算 矩阵与数相乘:
###Code
matr1 = np.arange(0, 4).reshape((2, 2))
matr1
matr2 = np.arange(2, 6).reshape((2, 2))
matr2
matr1 * 3
###Output
_____no_output_____
###Markdown
矩阵相加减:
###Code
matr1 + matr2
###Output
_____no_output_____
###Markdown
矩阵相乘:
###Code
matr1 * matr2
###Output
_____no_output_____
###Markdown
矩阵对应元素相乘:
###Code
np.multiply(matr1, matr2)
###Output
_____no_output_____
###Markdown
|属性 |说明||----------|------||T| 返回自身的转置 ||H| 返回自身的共轭转置||I|返回自身的逆矩阵||A|返回自身数据的2维数组的一个视图| 认识ufunc函数 **全称通用函数(universal function),是一种能够对数组中所有元素进行操作的函数。** - 四则运算:加(+)、减(-)、乘(*)、除(/)、幂(**)。数组间的四则运算表示对每个数组中的元素分别进行四则运算,所以形状必须相同。 - 比较运算:>、=、<=、!=。比较运算返回的结果是一个布尔数组,每个元素为每个数组对应元素的比较结果。- 逻辑运算:np.any函数表示逻辑“or”,np.all函数表示逻辑“and”。运算结果返回布尔值。 ** ufunc函数的广播机制:**广播(broadcasting)是指不同形状的数组之间执行算术运算的方式。需要遵循4个原则。- 让所有输入数组都向其中shape最长的数组看齐,shape中不足的部分都通过在前面加1补齐。- 输出数组的shape是输入数组shape的各个轴上的最大值。- 如果输入数组的某个轴和输出数组的对应轴的长度相同或者其长度为1时,这个数组能够用来计算,否则出错。- 当输入数组的某个轴的长度为1时,沿着此轴运算时都用此轴上的第一组值 读写文件 **- save函数是以二进制的格式保存数据。 np.save("../tmp/save_arr",arr)****- load函数是从二进制的文件中读取数据。 np.load("../tmp/save_arr.npy") ****- savez函数可以将多个数组保存到一个文件中。 np.savez('../tmp/savez_arr',arr1,arr2)****- 存储时可以省略扩展名,但读取时不能省略扩展名。 ** **savetxt函数是将数组写到某种分隔符隔开的文本文件中。 np.savetxt("../tmp/arr.txt", arr, fmt="%d", delimiter=",")****loadtxt函数执行的是把文件加载到一个二维数组中。 np.loadtxt("../tmp/arr.txt",delimiter=",")****genfromtxt函数面向的是结构化数组和缺失数据。 np.genfromtxt("../tmp/arr.txt", delimiter = ",")** 排序 直接排序 sort函数是最常用的排序方法。 arr.sort()sort函数也可以指定一个axis参数,使得sort函数可以沿着指定轴对数据集进行排序。axis=1为沿横轴排序; axis=0为沿纵轴排序
###Code
np.random.seed(42) #设置随机种子
arr = np.random.randint(1, 10, size=10) #生成随机数
print('创建的数组为:', arr)
arr.sort() #直接排序
print('排序后数组为:', arr)
arr = np.random.randint(1, 10, size=(3, 3)) #生成3行3列的随机数
print('创建的数组为:\n', arr)
arr.sort(axis=1) #沿着横轴排序
print('排序后数组为:\n', arr)
arr.sort(axis=0) #沿着纵轴排序
print('排序后数组为:\n', arr)
###Output
排序后数组为:
[[2 4 5]
[2 7 8]
[3 9 9]]
###Markdown
间接排序 argsort函数返回值为重新排序值的**下标**。 arr.argsort()lexsort函数返回值是按照最后一个传入数据排序的。 np.lexsort((a,b,c))
###Code
arr = np.array([2, 3, 6, 8, 0, 7])
print('创建的数组为:\n', arr)
print('排序后数组为:\n', arr.argsort()) #返回值为重新排序值的下标
a = np.array([3, 2, 6, 4, 5])
b = np.array([50, 30, 40, 20, 10])
c = np.array([400, 300, 600, 100, 200])
d = np.lexsort((a, b, c)) #lexsort函数只接受一个参数,即(a,b,c),返回下标
d
a[d], b[d], c[d]
# 相当于
a[c.argsort()], b[c.argsort()], c[c.argsort()]
#多个键值排序是按照最后一个传入数据计算的
print('排序后数组为:\n', list(zip(a[d], b[d], c[d])))
###Output
排序后数组为:
[(4, 20, 100), (5, 10, 200), (2, 30, 300), (3, 50, 400), (6, 40, 600)]
###Markdown
去重 unique函数
###Code
names = np.array(['小明', '小黄', '小花', '小明', '小花', '小兰', '小白'])
print('创建的数组为:', names)
print('去重后的数组为:', np.unique(names))
#跟np.unique等价的Python代码实现过程
print('去重后的数组为:', sorted(set(names)))
ints = np.array([1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10]) #创建数值型数据
print('创建的数组为:', ints)
print('去重后的数组为:', np.unique(ints))
###Output
创建的数组为: [ 1 2 3 4 4 5 6 6 7 8 8 9 10]
去重后的数组为: [ 1 2 3 4 5 6 7 8 9 10]
###Markdown
tile函数
###Code
arr = np.arange(5)
print('创建的数组为:', arr)
print('重复后数组为:', np.tile(arr, 3)) #对数组进行重复
arr.tolist() * 3
arr * 3
###Output
_____no_output_____
###Markdown
repeat函数
###Code
np.random.seed(42) #设置随机种子
arr = np.random.randint(0, 10, size=(3, 3))
print('创建的数组为:\n', arr)
print('重复后数组为:\n', arr.repeat(2, axis=0)) #按行进行元素重复
print('重复后数组为:\n', arr.repeat(2, axis=1)) #按列进行元素重复
###Output
重复后数组为:
[[6 6 3 3 7 7]
[4 4 6 6 9 9]
[2 2 6 6 7 7]]
###Markdown
**这两个函数的主要区别在于,tile函数是对数组进行重复操作,repeat函数是对数组中的每个元素进行重复操作。** 常用的统计函数 |函数 |说明||-------|---------||sum |计算数组的和||mean |计算数组均值||std |计算数组标准差||var |计算数组方差||min| 计算数组最小值||max| 计算数组最大值||argmin |返回数组最小元素的索引||argmax |返回数组最小元素的索引||cumsum |计算所有元素的累计和||cumprod |计算所有元素的累计积|
###Code
arr = np.arange(20).reshape(4,5)
print('创建的数组为:',arr)
print('数组的和为:',np.sum(arr)) #计算数组的和
print('数组横轴的和为:',arr.sum(axis = 0)) #沿着横轴计算求和
print('数组纵轴的和为:',arr.sum(axis = 1)) #沿着纵轴计算求和
print('数组的均值为:',np.mean(arr)) #计算数组均值
print('数组横轴的均值为:',arr.mean(axis = 0)) #沿着横轴计算数组均值
print('数组纵轴的均值为:',arr.mean(axis = 1)) #沿着纵轴计算数组均值
print('数组的标准差为:',np.std(arr)) #计算数组标准差
print('数组的方差为:',np.var(arr)) #计算数组方差
print('数组的最小值为:',np.min(arr)) #计算数组最小值
print('数组的最大值为:',np.max(arr)) #计算数组最大值
print('数组的最小元素为:',np.argmin(arr)) #返回数组最小元素的索引
print('数组的最大元素为:',np.argmax(arr)) #返回数组最大元素的索引
arr = np.arange(2,10)
print('创建的数组为:',arr)
print('数组元素的累计和为:',np.cumsum(arr)) #计算所有元素的累计和
print('数组元素的累计积为:',np.cumprod(arr)) #计算所有元素的累计积
###Output
创建的数组为: [2 3 4 5 6 7 8 9]
数组元素的累计和为: [ 2 5 9 14 20 27 35 44]
数组元素的累计积为: [ 2 6 24 120 720 5040 40320 362880]
###Markdown
Nothing
###Code
b = a[:, np.newaxis, :]
b.shape, b
a = np.arange(5)
a[:, np.newaxis].shape
a[np.newaxis, :].shape
a = np.arange(81).reshape(3, 3, 3, 3)
a
print(a[1, ..., 2], '\n----\n', a[1, :, :, 2])
a = np.arange(6)
a.shape = (2, 1, 3)
a.squeeze()
x = np.random.randint(20, size=60).reshape(3, 4, 5)
x
np.argmax(x, axis=0)
a = np.arange(0, 40, 10)
a = a[:, np.newaxis] # 转换a的维度(形状)
print(a)
np.array(([i * 3 for i in a.tolist()]))
a.tolist() * 3
a = [1, 2, 3]
a * 3
x = np.array(
[('Bob', 18, 2000.0), ('Tom', 23, 4000.0)],
dtype=[('name', np.str_), ('age', np.int_), ('incom', np.float_)])
x
x[:][0]
import numpy.ma as ma
ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]])
###Output
_____no_output_____
###Markdown
[掩码](http://www.naodongopen.com/290.html)
###Code
a = ma.masked_array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]])
a
a[~a.mask]
a.compressed()
x = np.ma.array(np.arange(5), mask=[0] * 2 + [1] * 3)
x
x.compressed()
###Output
_____no_output_____
###Markdown
[修改掩码](http://www.naodongopen.com/290.html) genfromtxt函数
###Code
from io import BytesIO
data = "1, 2, 3\n4, 5, 6"
#StringIO:在内存中读写str
from io import StringIO
f = StringIO()
print(f.write('hello'))
print(f.write(' '))
print(f.write('world!'))
f.getvalue()
###Output
_____no_output_____
###Markdown
NUMPY Further reading * [Python](http://www.python.org). The official Python web site. * [Python tutorials](http://docs.python.org/2/tutorial). The official Python tutorials. * [Think Python](http://www.greenteapress.com/thinkpython). A free book on Python. Numpy - multidimensional data arrays
###Code
# what is this line all about?!? Answer coming soon.
%matplotlib inline
###Output
_____no_output_____
###Markdown
Why `numpy`? Performance and ease of use.The `numpy` package (module) is used in almost all numerical computation using Python. It is a package that provide high-performance vector, matrix and higher-dimensional data structures for Python. It is implemented in C and Fortran so when calculations are vectorized (formulated with vectors and matrices), performance is very good. To use `numpy` need to import the module it using of example:
###Code
import numpy as np
import matplotlib.pyplot as pl
###Output
_____no_output_____
###Markdown
In the `numpy` package the terminology used for vectors, matrices and higher-dimensional data sets is *array*. Creating `numpy` arrays There are a number of ways to initialize new numpy arrays, for example from* a Python list or tuples* using functions that are dedicated to generating numpy arrays, such as `arange`, `linspace`, etc.* reading data from files From lists For example, to create new vector and matrix arrays from Python lists we can use the `numpy.array` function.
###Code
# a vector: the argument to the array function is a Python list
lst = [1,2.,3j,4]
v = np.array(lst)
v, v.dtype
# a matrix: the argument to the array function is a nested Python list
M = np.array([[1, 2, 3], [3, 4,3], [7,1,6]], dtype=np.float128)
M
###Output
_____no_output_____
###Markdown
The `v` and `M` objects are both of the type `ndarray` that the `numpy` module provides.
###Code
type(v), type(M)
###Output
_____no_output_____
###Markdown
The difference between the `v` and `M` arrays is only their shapes. We can get information about the shape of an array by using the `ndarray.shape` property.
###Code
M.shape
v.shape, v.dtype
###Output
_____no_output_____
###Markdown
The number of elements in the array is available through the `ndarray.size` property:
###Code
M.size
###Output
_____no_output_____
###Markdown
So far the `numpy.ndarray` looks awefully much like a Python list (or nested list). Why not simply use Python lists for computations instead of creating a new array type? There are several reasons:* Python lists are very general. They can contain any kind of object. They are dynamically typed. They do not support mathematical functions such as matrix and dot multiplications, etc. Implementating such functions for Python lists would not be very efficient because of the dynamic typing.* Numpy arrays are **statically typed** and **homogeneous**. The type of the elements is determined when array is created.* Numpy arrays are memory efficient.* Because of the static typing, fast implementation of mathematical functions such as multiplication and addition of `numpy` arrays can be implemented in a compiled language (C and Fortran is used). Using the `dtype` (data type) property of an `ndarray`, we can see what type the data of an array has:
###Code
M.dtype, type(M)
###Output
_____no_output_____
###Markdown
We get an error if we try to assign a value of the wrong type to an element in a numpy array:
###Code
M[0,1] = 1.2
M[1,1]=2.1
M
###Output
_____no_output_____
###Markdown
If we want, we can explicitly define the type of the array data when we create it, using the `dtype` keyword argument:
###Code
M = np.array([[1, 2], [3, 4]], dtype=float)
M
M[1,1]=2.1
M
###Output
_____no_output_____
###Markdown
Common type that can be used with `dtype` are: `int`, `float`, `complex`, `bool`, `object`, etc.We can also explicitly define the bit size of the data types, for example: `int64`, `int16`, `float128`, `complex128`. Using array-generating functions For larger arrays it is inpractical to initialize the data manually, using explicit python lists. Instead we can use one of the many functions in `numpy` that generates arrays of different forms. Some of the more common are: arange
###Code
# create a range
x = np.arange(0, 10, 1) # arguments: start, stop, step
x
###Output
_____no_output_____
###Markdown
linspace and logspace
###Code
# using linspace, both end points ARE included
np.linspace(0, 10, 25)
np.logspace(0, 10, 10, base=np.pi)
###Output
_____no_output_____
###Markdown
zeros and ones
###Code
np.zeros((5,3))
np.ones((3,3))
###Output
_____no_output_____
###Markdown
Reading Data from Files Comma-separated values (CSV) A very common file format for data files are the comma-separated values (CSV), or related format such as TSV (tab-separated values). To read data from such file into Numpy arrays we can use the `numpy.genfromtxt` function. For example,
###Code
!head data/ftir_data.csv
np.genfromtxt?
data = np.genfromtxt('data/ftir_data.csv', delimiter=';')
print (data.shape)
print (data)
fig, ax = pl.subplots(figsize=(10,3))
w = data[:,0]
a = data[:,1]
ax.plot(w, a, 'magenta')
ax.set_title('FTIR spectra')
ax.set_xlabel('wavenumber (cm$^{-1}$)')
ax.set_ylabel('absorbance (a.u)')
ax.set_xlim(4000,1500)
ax.set_ylim(-0.5,1.5)
fig.savefig('data/myfirst.jpg')
###Output
_____no_output_____
###Markdown
Using `numpy.savetxt` we can store a Numpy array to a file in CSV format:
###Code
M = np.random.rand(3,3)
M
np.savetxt("data/random-matrix.csv", M)
!cat random-matrix.csv
np.savetxt("random-matrix.csv", M, fmt='%.2e') # fmt specifies the format
!cat random-matrix.csv
###Output
_____no_output_____
###Markdown
Manipulating arrays Indexing We can index elements in an array using the square bracket and indices:
###Code
lst = [1.,2.,3.,4.]
v = np.array(lst)
print(v)
# v is a vector, and has only one dimension, taking one index
v[-2]
# M is a matrix, or a 2 dimensional array, taking two indices
M[0,2]
###Output
_____no_output_____
###Markdown
If we omit an index of a multidimensional array it returns the whole row (or, in general, a N-1 dimensional array)
###Code
M
M[1]
###Output
_____no_output_____
###Markdown
The same thing can be achieved with using `:` instead of an index:
###Code
M[1,:] # row 1
M[:,1] # column 1
###Output
_____no_output_____
###Markdown
We can assign new values to elements in an array using indexing:
###Code
M[:,:] = 0
M
# also works for rows and columns
M[1,:] = 2
M[:,2] = -1
M
###Output
_____no_output_____
###Markdown
Index slicing Index slicing is the technical name for the syntax `M[lower:upper:step]` to extract part of an array:
###Code
A = np.array([1,2,3,4,5])
A
A[1:3]
###Output
_____no_output_____
###Markdown
Array slices are *mutable*: if they are assigned a new value the original array from which the slice was extracted is modified:
###Code
A[1:3] = [-2,-3]
A
###Output
_____no_output_____
###Markdown
We can omit any of the three parameters in `M[lower:upper:step]`:
###Code
A[::] # lower, upper, step all take the default values
A[::2] # step is 2, lower and upper defaults to the beginning and end of the array
A[:3] # first three elements
A[3:] # elements from index 3
###Output
_____no_output_____
###Markdown
Negative indices counts from the end of the array (positive index from the begining):
###Code
A = np.array([1,2,3,4,5])
A[-1] # the last element in the array
A[-3:] # the last three elements
###Output
_____no_output_____
###Markdown
Index slicing works exactly the same way for multidimensional arrays:
###Code
m=1.
[n+m*15 for n in range(5)]
A = np.array([[np.sin((n/10.+(m/.100)**2)*np.pi) for n in range(5)] for m in range(5)])
A
# a block from the original array
A[1:4, 1:4]
# strides
A[:4:2, :4:2]
###Output
_____no_output_____
###Markdown
Fancy indexing Fancy indexing is the name for when an array or list is used in-place of an index:
###Code
row_indices = [1, 4]
A[row_indices]
col_indices = [1, 2, -1] # remember, index -1 means the last element
A[row_indices, col_indices]
###Output
_____no_output_____
###Markdown
We can also index masks: If the index mask is an Numpy array of with data type `bool`, then an element is selected (True) or not (False) depending on the value of the index mask at the position each element:
###Code
B = np.arange(5)
B
row_mask = np.array([True, False, True, False, False])
B[row_mask]
# same thing
row_mask = np.array([1,0,0,1,0], dtype=bool)
B[row_mask]
mask = (w < 4000) & (w>3000)
print(mask)
data1 = data[mask]
fig, ax = pl.subplots(figsize=(10,3))
w1 = data1[:,0]
a1 = data1[:,1]
ax.plot(w1, a1, 'magenta')
ax.set_title('FTIR spectra')
ax.set_xlabel('wavenumber (cm$^{-1}$)')
ax.set_ylabel('absorbance (a.u)')
ax.set_xlim(4000,1500)
ax.set_ylim(-0.5,1.5)
w = data[:,0]
a = data[:,1]+.05
ax.plot(w, a, 'blue')
###Output
_____no_output_____
###Markdown
This feature is very useful to conditionally select elements from an array, using for example comparison operators:
###Code
x = np.arange(0, 10, 0.5)
x
mask = (5 < x) * (x < 7.5)
mask
x[mask]
###Output
_____no_output_____
###Markdown
Functions for extracting data from arrays and creating arrays where The index mask can be converted to position index using the `where` function
###Code
indices = np.where(mask)
indices
x[indices] # this indexing is equivalent to the fancy indexing x[mask]
###Output
_____no_output_____
###Markdown
Functions for extracting data from arrays and creating arrays diag With the diag function we can also extract the diagonal and subdiagonals of an array:
###Code
np.diag(A)
np.diag(A, -1)
###Output
_____no_output_____
###Markdown
Functions for extracting data from arrays and creating arrays choose Constructs and array by picking elements form several arrays:
###Code
which = [1, 0, 1, 0]
choices = [[-2,-2,-2,-2], [5,5,5,5]]
np.choose(which, choices)
###Output
_____no_output_____
###Markdown
Linear algebra Vectorizing code is the key to writing efficient numerical calculation with Python/Numpy.That means that as much as possible of a program should be formulated in terms of matrix and vector operations, like matrix-matrix multiplication. Scalar-array operations We can use the usual arithmetic operators to multiply, add, subtract, and divide arrays with scalar numbers.
###Code
v1 = np.arange(0, 5)
v1 * 2
v1 + 2
A * 2, A + 2
###Output
_____no_output_____
###Markdown
Element-wise array-array operations When we add, subtract, multiply and divide arrays with each other, the default behaviour is **element-wise** operations:
###Code
A * A # element-wise multiplication
v1 * v1
###Output
_____no_output_____
###Markdown
If we multiply arrays with compatible shapes, we get an element-wise multiplication of each row:
###Code
A.shape, v1.shape
A * v1
###Output
_____no_output_____
###Markdown
Data processing Often it is useful to store datasets in Numpy arrays. Numpy provides a number of functions to calculate statistics of datasets in arrays. For example, let's calculate some properties data from the ftir dataset used above.
###Code
# reminder, the dataset is stored in the data variable:
data.shape
###Output
_____no_output_____
###Markdown
mean
###Code
# the absorbance is in column 1
data[:,1].mean()
###Output
_____no_output_____
###Markdown
The mean absorbance is 0.09 standard deviations and variance
###Code
data[:,1].std(), data[:,1].var()
###Output
_____no_output_____
###Markdown
min and max
###Code
# lowest absorbance
data[:,1].min()
# highest absorbance
data[:,1].max()
###Output
_____no_output_____
###Markdown
sum, prod, and trace
###Code
d = np.arange(0, 10)
d
# sum up all elements
d.sum()
# product of all elements
(d+1).prod()
# cummulative sum
d.cumsum()
# cummulative product
(d+1).cumprod()
# same as: diag(A).sum()
np.trace(A)
###Output
_____no_output_____
###Markdown
Computations on subsets of arrays We can compute with subsets of the data in an array using indexing, fancy indexing, and the other methods of extracting data from an array (described above). If we are interested in the average absorbance only in a particular range of frequency, then we can create a index mask and use the select out only the data for that range using:
###Code
np.unique(data[:,0]) # frequency column
mask_oh = data[:,0] > 3000
# the absorbance data is in column 1
np.mean(data[mask_oh,1])
###Output
_____no_output_____
###Markdown
With these tools we have very powerful data processing capabilities at our disposal.
###Code
fig, ax = pl.subplots(figsize=(12,6))
w = data[mask_oh,0]
a = data[mask_oh,1]
ax.plot(w, a)
ax.set_title('FTIR spectra in OH region')
ax.set_xlabel('wavenumber (cm$^{-1}$)')
ax.set_ylabel('absorbance (a.u)');
###Output
_____no_output_____
###Markdown
Reshaping, resizing and stacking arrays The shape of an Numpy array can be modified without copying the underlaying data, which makes it a fast operation even for large arrays.
###Code
A
n, m = A.shape
B = A.reshape((1,n*m))
B
B[0,0:5] = 5 # modify the array
B
A # and the original variable is also changed. B is only a different view of the same data
###Output
_____no_output_____
###Markdown
We can also use the function `flatten` to make a higher-dimensional array into a vector. But this function create a copy of the data.
###Code
B = A.flatten()
B
B[0:5] = 10
B
A # now A has not changed, because B's data is a copy of A's, not refering to the same data
###Output
_____no_output_____
###Markdown
Adding a new dimension: newaxis With `newaxis`, we can insert new dimensions in an array, for example converting a vector to a column or row matrix:
###Code
v = np.array([1,2,3])
np.shape(v)
# make a column matrix of the vector v
v[:, np.newaxis]
# column matrix
v[:,np.newaxis].shape
# row matrix
v[np.newaxis,:].shape
###Output
_____no_output_____
###Markdown
Stacking and repeating arrays Using function `repeat`, `tile`, `vstack`, `hstack`, and `concatenate` we can create larger vectors and matrices from smaller ones: tile and repeat
###Code
a = np.array([[1, 2], [3, 4]])
# repeat each element 3 times
np.repeat(a, 3)
# tile the matrix 3 times
np.tile(a, 3)
###Output
_____no_output_____
###Markdown
concatenate
###Code
b = np.array([[5, 6]])
np.concatenate((a, b), axis=0)
np.concatenate((a, b.T), axis=1)
###Output
_____no_output_____
###Markdown
hstack and vstack
###Code
np.vstack((a,b))
np.hstack((a,b.T))
###Output
_____no_output_____
###Markdown
Copy and "deep copy" To achieve high performance, assignments in Python usually do not copy the underlaying objects. This is important for example when objects are passed between functions, to avoid an excessive amount of memory copying when it is not necessary (techincal term: pass by reference).
###Code
A = np.array([[1, 2], [3, 4]])
A
# now B is referring to the same array data as A
B = A
# changing B affects A
B[0,0] = 10
B
A
###Output
_____no_output_____
###Markdown
If we want to avoid this behavior, so that when we get a new completely independent object `B` copied from `A`, then we need to do a so-called "deep copy" using the function `copy`:
###Code
B = np.copy(A)
# now, if we modify B, A is not affected
B[0,0] = -5
B
A
###Output
_____no_output_____
###Markdown
Iterating over array elements Generally, we want to avoid iterating over the elements of arrays whenever we can (at all costs). The reason is that in a interpreted language like Python (or MATLAB), iterations are really slow compared to vectorized operations. However, sometimes iterations are unavoidable. For such cases, the Python `for` loop is the most convenient way to iterate over an array:
###Code
v = np.array([1,2,3,4])
for element in v:
print(element)
M = np.array([[1,2], [3,4]])
for row in M:
print("row", row)
for element in row:
print(element)
###Output
_____no_output_____
###Markdown
When we need to iterate over each element of an array and modify its elements, it is convenient to use the `enumerate` function to obtain both the element and its index in the `for` loop:
###Code
for row_idx, row in enumerate(M):
print("row_idx", row_idx, "row", row)
for col_idx, element in enumerate(row):
print("col_idx", col_idx, "element", element)
# update the matrix M: square each element
M[row_idx, col_idx] = element ** 2
# each element in M is now squared
M
###Output
_____no_output_____
###Markdown
Performance
###Code
%%timeit
M = np.array([[1,2], [3,4]])
for row_idx, row in enumerate(M):
for col_idx, element in enumerate(row):
# update the matrix M: square each element
M[row_idx, col_idx] = element ** 2
M
M = np.array([[1,2], [3,4]])
%timeit M*M
M
###Output
_____no_output_____
###Markdown
Using arrays in conditions When using arrays in conditions in for example `if` statements and other boolean expressions, one need to use one of `any` or `all`, which requires that any or all elements in the array evalutes to `True`:
###Code
M
if (M > 5).any():
print("at least one element in M is larger than 5")
else:
print("no element in M is larger than 5")
if (M > 5).all():
print("all elements in M are larger than 5")
else:
print("all elements in M are not larger than 5")
###Output
_____no_output_____
###Markdown
numpy ---**author:** 刘志强**mail:** [email protected]**time:** 2019/4/2*互相学习,互相进步*--- (1) ndarry (N-dimensinal array)
###Code
ndarry是numpy的核心。
###Output
_____no_output_____
###Markdown
import numpy as np define a 1-D arraya1=np.array([1,2,3])a2=np.array([[1,2,3]])print a1print a2 check the attribute of 'a'print 'data framework is:',type(a1)print 'the data type for a1 is:',a1.dtypeprint 'the rank is: ',a1.ndimprint 'the size is:',a1.sizeprint 'the shape is:',a1.shapeprint 'the shape is:',a2.shape define a 2-D arrayb1=np.array([[1.2,1.4],[0.3,0.5]],dtype=complex)b2=np.array([['a','b'],['c','d']])print b1print b2 check the attribute of 'b'print 'the data type for b1 is:',b1.dtypeprint 'the data type for b2 is:',b2.dtypeprint 'the rank is: ',b1.ndimprint 'the size is:',b1.sizeprint 'the shape is:',b1.shape create arrayprint np.zeros((3,3))print np.ones((3,3))print np.random.random((3,3))print '--------------------------'print np.arange(4)print np.arange(4,10)print np.arange(1,12,3)print np.arange(0,6,0.6)print np.arange(0,12).reshape(3,4)print '--------------------------'print np.linspace(0,10,5)
###Code
## (2) 代数矩阵运算
###Output
_____no_output_____
###Markdown
1)在Matlab中矩阵的运算很方便 , 矩阵乘法除法用*和/就行 , 矩阵元素间的运算为.*和./ , 逆为inv(), 伪逆为pinv() . 2)在Fortran中矩阵的乘法为matmul(,),转置为transpose(),逆需要use imsl或其他库。 3)在python numpy中, 矩阵元素乘除为*和/ , 矩阵乘法为np.dot(,)
###Code
######element operation#######
a1=np.arange(4,8)
a2=np.arange(4)
print a1
print a2
print '------------------'
print a1+4
print a1*2
print a1+a2
print a1*a2
print '------------------'
print a1*np.sin(a2)
A=np.arange(0,9).reshape(3,3)
B=np.array([[1,2,3],[2,5,9],[9,4,2]])
print A*B
######matrix operation######
print np.dot(A,B)
print A.dot(B)
print np.dot(B,A)
print '---------------------'
print np.linalg.inv(B)
e,v=np.linalg.eig(B)
print '---------------------'
print e
print v
print np.diag(e)
print '---------------------'
print np.linalg.pinv(A)
###Output
[[20 13 13]
[56 46 55]
[92 79 97]]
[[20 13 13]
[56 46 55]
[92 79 97]]
[[ 24 30 36]
[ 69 85 101]
[ 24 39 54]]
---------------------
[[-1.52941176 0.47058824 0.17647059]
[ 4.52941176 -1.47058824 -0.17647059]
[-2.17647059 0.82352941 0.05882353]]
---------------------
[12.20928637 -0.36190391 -3.84738246]
[[-0.28829306 -0.26117461 -0.15593447]
[-0.77757905 0.85368797 -0.68685997]
[-0.55880044 -0.4505604 0.70986465]]
[[12.20928637 0. 0. ]
[ 0. -0.36190391 0. ]
[ 0. 0. -3.84738246]]
---------------------
[[-5.55555556e-01 -1.66666667e-01 2.22222222e-01]
[-5.55555556e-02 1.83880688e-16 5.55555556e-02]
[ 4.44444444e-01 1.66666667e-01 -1.11111111e-01]]
###Markdown
聚合函数,sum、min、max、mean等
###Code
######aggregation function######
print A
print A.sum()
print A.min()
print A.max()
print a.mean()
print a.std()
###Output
[[0 1 2]
[3 4 5]
[6 7 8]]
36
0
8
2.0
0.816496580927726
###Markdown
(3) 索引、切片、迭代
###Code
######index######
a=np.arange(10,16)
print a
print a[0],a[1],a[-1],a[-2]
print a[[1,3,4]]
######section######
A=np.arange(10,19).reshape((3,3))
print a[1:5]
print a[1:5:2]
print a[::2]
print a[:5:2]
print a[:5:]
print '-------------------'
print A
print A[0,:]
print A[0:2,0:2]
print A[[0,2],0:2]
######iteration######
for i in a:
print i
for row in A:
print row
for item in A.flat:
print item
print '-------------------'
print np.apply_along_axis(np.mean,axis=0,arr=A)
print np.apply_along_axis(np.mean,axis=1,arr=A)
print '-------------------'
def f(x):
return x/2
print np.apply_along_axis(f,axis=1,arr=A)
print np.apply_along_axis(f,axis=0,arr=A)
###Output
10
11
12
13
14
15
[10 11 12]
[13 14 15]
[16 17 18]
10
11
12
13
14
15
16
17
18
-------------------
[13. 14. 15.]
[11. 14. 17.]
-------------------
[[5 5 6]
[6 7 7]
[8 8 9]]
[[5 5 6]
[6 7 7]
[8 8 9]]
###Markdown
(4) 条件
###Code
A=np.random.random((4,4))
print A<0.5
print A[A<0.5]
###Output
[[False False False False]
[False False True False]
[ True True True False]
[ True True False True]]
[0.11192502 0.03210371 0.37951122 0.35511741 0.15791806 0.18274546
0.17341374]
###Markdown
(5) 形状改变
###Code
a=np.random.random(12)
print 'initial vector "a"',a
A=a.reshape(3,4)
a.shape=(3,4)
print '----------------'
print 'tansform into 3*4 matrix',a
print '----------------'
print 'back to vector',a.ravel()
a.shape=(12)
print '----------------'
print A.transpose()
###Output
initial vector "a" [0.19356155 0.21455872 0.44775613 0.20349479 0.35010934 0.03703813
0.83740332 0.32814506 0.24217409 0.52875548 0.95675934 0.44882848]
----------------
tansform into 3*4 matrix [[0.19356155 0.21455872 0.44775613 0.20349479]
[0.35010934 0.03703813 0.83740332 0.32814506]
[0.24217409 0.52875548 0.95675934 0.44882848]]
----------------
back to vector [0.19356155 0.21455872 0.44775613 0.20349479 0.35010934 0.03703813
0.83740332 0.32814506 0.24217409 0.52875548 0.95675934 0.44882848]
----------------
[[0.19356155 0.35010934 0.24217409]
[0.21455872 0.03703813 0.52875548]
[0.44775613 0.83740332 0.95675934]
[0.20349479 0.32814506 0.44882848]]
###Markdown
(6) 连接和切分
###Code
######joint######
A=np.ones((3,3))
B=np.zeros((3,3))
print np.vstack((A,B))
print np.hstack((A,B))
a=np.array([0,1,2])
b=np.array([3,4,5])
c=np.array([6,7,8])
print np.column_stack((a,b,c))
print np.row_stack((a,b,c))
######split######
A=np.arange(16).reshape((4,4))
[B,C]=np.hsplit(A,2)
print A
print B
print C
print '---------------'
[B,C]=np.vsplit(A,2)
print B
print C
[A1,A2,A3]=np.split(A,[1,3],axis=1)
print A1
print A2
print A3
###Output
[[ 0]
[ 4]
[ 8]
[12]]
[[ 1 2]
[ 5 6]
[ 9 10]
[13 14]]
[[ 3]
[ 7]
[11]
[15]]
###Markdown
(7) 视图和副本
###Code
a=np.array([1,2,3,4])
b=a
print b
a[2]=0
print b
a=np.array([1,2,3,4])
b=a.copy()
print b
a[2]=0
print b
###Output
[1 2 3 4]
[1 2 0 4]
[1 2 3 4]
[1 2 3 4]
###Markdown
(8) Numpy广播机制
###Code
A=np.arange(16).reshape(4,4)
b=np.arange(4)
print A+b
m=np.arange(6).reshape(3,1,2)
n=np.arange(6).reshape(3,2,1)
print m+n
###Output
[[ 0 2 4 6]
[ 4 6 8 10]
[ 8 10 12 14]
[12 14 16 18]]
[[[ 0 1]
[ 1 2]]
[[ 4 5]
[ 5 6]]
[[ 8 9]
[ 9 10]]]
###Markdown
(9) 结构化数组
###Code
s=np.array([(1,'first',0.5,1+2j),(2,'second',1.1,1-1j)],dtype=('i2,a6,f4,c8'))
print np.array([(1,'first',0.5,1+2j),(2,'second',1.1,1-1j)],dtype=('i2,a6,f4,c8'))
print np.array([(1,'first',0.5,1+2j),(2,'second',1.1,1-1j)],dtype=('int16,a6,float32,complex64'))
print s[1]
print s['f1']
print s['f2']
###Output
(2, 'second', 1.1, 1.-1.j)
['first' 'second']
[0.5 1.1]
###Markdown
Numpy 一个基础的科学计算库 * `import numpy as np` ndarray * $ c=a^2+b^3 $
###Code
import numpy as np
def npSum():
a = np.array([0, 1, 2, 3, 4])
b = np.array([9, 8, 7, 6, 5])
c = a**2 + b**3
return c
print(npSum())
###Output
[729 513 347 225 141]
###Markdown
- 轴(axis):保存数据的维度- 秩(rank):轴的数量
###Code
a = np.array([[0, 1, 2, 3, 4],
[9, 8, 7, 6, 5]])
print(a.ndim, a.shape, a.size, a.dtype, a.itemsize)
###Output
2 (2, 5) 10 int32 4
###Markdown
ndarray数组的创建方法1. 从python中的列表、元组等类型创建ndarray数组 * `x = np.array(list/tuple)` * `x = np.array(list/tuple,dtype=np.float32)` ----2. 使用numpy中的函数创建ndarray数组,如:arange,ones,zeros,full,eye(单位矩阵)等 后面四个默认是浮点型---3. 使用其他函数创建ndarray数组 * np.linspace() 根据起止数据等间距地填充数据,形成数组 * np.concatenate() 将两个或多个数组合并成一个新的数组
###Code
np.arange(10)
np.ones((3,6))
np.zeros((3,6),dtype=np.int32)
np.eye(5,dtype=int) #默认是float
x = np.ones((2, 3, 4))
print(x)
x.shape
a = np.linspace(1,10,4)
a
b = np.linspace(1,10,4,endpoint=False) #最后一个元素是否作为值
b
c = np.concatenate((a,b))
c
###Output
_____no_output_____
###Markdown
ndarray数组的变换* 维度变换| 方法 | 说明 ||:-|:-|| .reshape(shape) | 不改变数组元素,返回一个shape形状的数组,原数组不变 || .resize(shape) | 与 .shape()功能一致,但改变原数组 ||.swapaxes(ax1,ax2) |将数组n个维度中的两个维度进调换| |.flatten() | 对数组进行降维,返回折叠后的一维数组,原数组不变 |* 元素类型变换 * .astype() * .tolist()
###Code
a = np.ones((2, 3, 4), dtype=np.int32)
a
a.reshape(3, 8)
a # a没有变化
a.resize(3, 8)
a
a #a变了
a.flatten()
a
###Output
_____no_output_____
###Markdown
数组的索引和切片* 一维数组的索引和切片:与python的列表类似
###Code
# 索引
a = np.array([9, 8, 7, 6, 5])
print(a[2])
# 切片
a[1 : 4 : 2]
a = np.arange(24).reshape((2, 3, 4))
a
a[1, 2, 3]
a[0, 1, 2]
a[-1, -2, -3]
# : 选择整个维度
a[:, 1, -3]
a[:, 1:3, :]
# ::步长跳跃
a[:, :,::2]
###Output
_____no_output_____
###Markdown
ndarray 数组的运算1. 数组与标量的运算2. 元素级运算函数 一元函数|函数|说明||:---|:---||np.abs(x) np.fabs(x)|绝对值||np.sqrt(x)|平方根||np.square|平方||np.log(x) np.log10(x) np.log2(x)|自然对数、10底对数和2底对数||np.ceil(x) np.floor(x)|各元素的ceiling值或floor值||np.rint(x)|四舍五入||np.modf(x)|将各元素的小数和整数以独立的两个数组返回||np.cos(x).....|三角函数||np.exp(x)|指数||np.sign(x)|各元素的符号值,1(+),0,-1(-)| 二元函数|函数|说明||:---|:---||+- * ** |两个数组各元素进行对应运算||np.maximum(x,y) np.fmax() np.minmun(x,y) np.fmin()|元素级的最大值/最小值计算||np.mod(x,y)|元素级模运算||np.copysign(x,y)|将元素y中的各元素值得符号赋值给数组x对应的元素|| <= == !=|算数比较,产生布尔型数组|
###Code
a
a.mean()
# mean
a = a/a.mean()
a
###Output
_____no_output_____
###Markdown
NumPy Contents- [NumPy](NumPy) - [Overview](Overview) - [Introduction to NumPy](Introduction-to-NumPy) - [NumPy Arrays](NumPy-Arrays) - [Operations on Arrays](Operations-on-Arrays) - [Additional Functionality](Additional-Functionality) - [Exercises](Exercises) - [Solutions](Solutions) > “Let’s be clear: the work of science has nothing whatever to do with consensus. Consensus is the business of politics. Science, on the contrary, requires only one investigator who happens to be right, which means that he or she has results that are verifiable by reference to the real world. In science consensus is irrelevant. What is relevant is reproducible results.” – Michael Crichton Overview[NumPy](https://en.wikipedia.org/wiki/NumPy) is a first-rate library for numerical programming- Widely used in academia, finance and industry - Mature, fast, stable and under continuous development In this lecture we introduce NumPy arrays and the fundamental array processing operations provided by NumPy References- [The official NumPy documentation](http://docs.scipy.org/doc/numpy/reference/) Introduction to NumPyThe essential problem that NumPy solves is fast array processingFor example, suppose we want to create an array of 1 million random draws from a uniform distribution and compute the meanIf we did this in pure Python it would be orders of magnitude slower than C or FortranThis is because- Loops in Python over Python data types like lists carry significant overhead - C and Fortran code contains a lot of type information that can be used for optimization - Various optimizations can be carried out during compilation, when the compiler sees the instructions as a whole However, for a task like the one described above there’s no need to switch back to C or FortranInstead we can use NumPy, where the instructions look like this:
###Code
import numpy as np
x = np.random.uniform(0, 1, size=1000000)
x.mean()
###Output
_____no_output_____
###Markdown
The operations of creating the array and computing its mean are both passed out to carefully optimized machine code compiled from CMore generally, NumPy sends operations *in batches* to optimized C and Fortran codeThis is similar in spirit to Matlab, which provides an interface to fast Fortran routines A Comment on VectorizationNumPy is great for operations that are naturally *vectorized*Vectorized operations are precompiled routines that can be sent in batches, like- matrix multiplication and other linear algebra routines - generating a vector of random numbers - applying a fixed transformation (e.g., sine or cosine) to an entire array In a [later lecture](https://lectures.quantecon.org/py/numba.html) we’ll discuss code that isn’t easy to vectorize and how such routines can also be optimized NumPy ArraysThe most important thing that NumPy defines is an array data type formally called a [numpy.ndarray](http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html)NumPy arrays power a large proportion of the scientific Python ecosystemTo create a NumPy array containing only zeros we use [np.zeros](http://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.htmlnumpy.zeros)
###Code
a = np.zeros(3)
a
type(a)
###Output
_____no_output_____
###Markdown
NumPy arrays are somewhat like native Python lists, except that- Data *must be homogeneous* (all elements of the same type) - These types must be one of the data types (`dtypes`) provided by NumPy The most important of these dtypes are:- float64: 64 bit floating point number - int64: 64 bit integer - bool: 8 bit True or False There are also dtypes to represent complex numbers, unsigned integers, etcOn modern machines, the default dtype for arrays is `float64`
###Code
a = np.zeros(3)
type(a[0])
###Output
_____no_output_____
###Markdown
If we want to use integers we can specify as follows:
###Code
a = np.zeros(3, dtype=int)
type(a[0])
###Output
_____no_output_____
###Markdown
Shape and DimensionConsider the following assignment
###Code
z = np.zeros(10)
###Output
_____no_output_____
###Markdown
Here `z` is a *flat* array with no dimension — neither row nor column vectorThe dimension is recorded in the `shape` attribute, which is a tuple
###Code
z.shape
###Output
_____no_output_____
###Markdown
Here the shape tuple has only one element, which is the length of the array (tuples with one element end with a comma)To give it dimension, we can change the `shape` attribute
###Code
z.shape = (10, 1)
z
z = np.zeros(4)
z.shape = (2, 2)
z
###Output
_____no_output_____
###Markdown
In the last case, to make the 2 by 2 array, we could also pass a tuple to the `zeros()` function, asin `z = np.zeros((2, 2))` Creating ArraysAs we’ve seen, the `np.zeros` function creates an array of zerosYou can probably guess what `np.ones` createsRelated is `np.empty`, which creates arrays in memory that can later be populated with data
###Code
z = np.empty(3)
z
###Output
_____no_output_____
###Markdown
The numbers you see here are garbage values(Python allocates 3 contiguous 64 bit pieces of memory, and the existing contents of those memory slots are interpreted as `float64` values)To set up a grid of evenly spaced numbers use `np.linspace`
###Code
z = np.linspace(2, 4, 5) # From 2 to 4, with 5 elements
###Output
_____no_output_____
###Markdown
To create an identity matrix use either `np.identity` or `np.eye`
###Code
z = np.identity(2)
z
###Output
_____no_output_____
###Markdown
In addition, NumPy arrays can be created from Python lists, tuples, etc. using `np.array`
###Code
z = np.array([10, 20]) # ndarray from Python list
z
type(z)
z = np.array((10, 20), dtype=float) # Here 'float' is equivalent to 'np.float64'
z
z = np.array([[1, 2], [3, 4]]) # 2D array from a list of lists
z
###Output
_____no_output_____
###Markdown
See also `np.asarray`, which performs a similar function, but does not makea distinct copy of data already in a NumPy array
###Code
na = np.linspace(10, 20, 2)
na is np.asarray(na) # Does not copy NumPy arrays
na is np.array(na) # Does make a new copy --- perhaps unnecessarily
###Output
_____no_output_____
###Markdown
To read in the array data from a text file containing numeric data use `np.loadtxt`or `np.genfromtxt`—see [the documentation](http://docs.scipy.org/doc/numpy/reference/routines.io.html) for details Array IndexingFor a flat array, indexing is the same as Python sequences:
###Code
z = np.linspace(1, 2, 5)
z
z[0]
z[0:2] # Two elements, starting at element 0
z[-1]
###Output
_____no_output_____
###Markdown
For 2D arrays the index syntax is as follows:
###Code
z = np.array([[1, 2], [3, 4]])
z
z[0, 0]
z[0, 1]
###Output
_____no_output_____
###Markdown
And so onNote that indices are still zero-based, to maintain compatibility with Python sequencesColumns and rows can be extracted as follows
###Code
z[0, :]
z[:, 1]
###Output
_____no_output_____
###Markdown
NumPy arrays of integers can also be used to extract elements
###Code
z = np.linspace(2, 4, 5)
z
indices = np.array((0, 2, 3))
z[indices]
###Output
_____no_output_____
###Markdown
Finally, an array of `dtype bool` can be used to extract elements
###Code
z
d = np.array([0, 1, 1, 0, 0], dtype=bool)
d
z[d]
###Output
_____no_output_____
###Markdown
We’ll see why this is useful belowAn aside: all elements of an array can be set equal to one number using slice notation
###Code
z = np.empty(3)
z
z[:] = 42
z
###Output
_____no_output_____
###Markdown
Array MethodsArrays have useful methods, all of which are carefully optimized
###Code
a = np.array((4, 3, 2, 1))
a
a.sort() # Sorts a in place
a
a.sum() # Sum
a.mean() # Mean
a.max() # Max
a.argmax() # Returns the index of the maximal element
a.cumsum() # Cumulative sum of the elements of a
a.cumprod() # Cumulative product of the elements of a
a.var() # Variance
a.std() # Standard deviation
a.shape = (2, 2)
a.T # Equivalent to a.transpose()
###Output
_____no_output_____
###Markdown
Another method worth knowing is `searchsorted()`If `z` is a nondecreasing array, then `z.searchsorted(a)` returns the index of the first element of `z` that is `>= a`
###Code
z = np.linspace(2, 4, 5)
z
z.searchsorted(2.2)
###Output
_____no_output_____
###Markdown
Many of the methods discussed above have equivalent functions in the NumPy namespace
###Code
a = np.array((4, 3, 2, 1))
np.sum(a)
np.mean(a)
###Output
_____no_output_____
###Markdown
Operations on Arrays Arithmetic OperationsThe operators `+`, `-`, `*`, `/` and `**` all act *elementwise* on arrays
###Code
a = np.array([1, 2, 3, 4])
b = np.array([5, 6, 7, 8])
a + b
a * b
###Output
_____no_output_____
###Markdown
We can add a scalar to each element as follows
###Code
a + 10
###Output
_____no_output_____
###Markdown
Scalar multiplication is similar
###Code
a * 10
###Output
_____no_output_____
###Markdown
The two dimensional arrays follow the same general rules
###Code
A = np.ones((2, 2))
B = np.ones((2, 2))
A + B
A + 10
A * B
###Output
_____no_output_____
###Markdown
In particular, `A * B` is *not* the matrix product, it is an element-wise product Matrix MultiplicationWith Anaconda’s scientific Python package based around Python 3.5 and above,one can use the `@` symbol for matrix multiplication, as follows:
###Code
A = np.ones((2, 2))
B = np.ones((2, 2))
A @ B
###Output
_____no_output_____
###Markdown
(For older versions of Python and NumPy you need to use the [np.dot](http://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) function)We can also use `@` to take the inner product of two flat arrays
###Code
A = np.array((1, 2))
B = np.array((10, 20))
A @ B
###Output
_____no_output_____
###Markdown
In fact, we can use `@` when one element is a Python list or tuple
###Code
A = np.array(((1, 2), (3, 4)))
A
A @ (0, 1)
###Output
_____no_output_____
###Markdown
Since we are postmultiplying, the tuple is treated as a column vector Mutability and Copying ArraysNumPy arrays are mutable data types, like Python listsIn other words, their contents can be altered (mutated) in memory after initializationWe already saw examples aboveHere’s another example:
###Code
a = np.array([42, 44])
a
a[-1] = 0 # Change last element to 0
a
###Output
_____no_output_____
###Markdown
Mutability leads to the following behavior (which can be shocking to MATLAB programmers…)
###Code
a = np.random.randn(3)
a
b = a
b[0] = 0.0
a
###Output
_____no_output_____
###Markdown
What’s happened is that we have changed `a` by changing `b`The name `b` is bound to `a` and becomes just another reference to thearray (the Python assignment model is described in more detail [later in the course](https://lectures.quantecon.org/py/python_advanced_features.html))Hence, it has equal rights to make changes to that arrayThis is in fact the most sensible default behavior!It means that we pass around only pointers to data, rather than making copiesMaking copies is expensive in terms of both speed and memory Making CopiesIt is of course possible to make `b` an independent copy of `a` when requiredThis can be done using `np.copy`
###Code
a = np.random.randn(3)
a
b = np.copy(a)
b
###Output
_____no_output_____
###Markdown
Now `b` is an independent copy (called a *deep copy*)
###Code
b[:] = 1
b
a
###Output
_____no_output_____
###Markdown
Note that the change to `b` has not affected `a` Additional FunctionalityLet’s look at some other useful things we can do with NumPy Vectorized FunctionsNumPy provides versions of the standard functions `log`, `exp`, `sin`, etc. that act *element-wise* on arrays
###Code
z = np.array([1, 2, 3])
np.sin(z)
###Output
_____no_output_____
###Markdown
This eliminates the need for explicit element-by-element loops such as
###Code
n = len(z)
y = np.empty(n)
for i in range(n):
y[i] = np.sin(z[i])
###Output
_____no_output_____
###Markdown
Because they act element-wise on arrays, these functions are called *vectorized functions*In NumPy-speak, they are also called *ufuncs*, which stands for “universal functions”As we saw above, the usual arithmetic operations (`+`, `*`, etc.) alsowork element-wise, and combining these with the ufuncs gives a very large set of fast element-wise functions
###Code
z
(1 / np.sqrt(2 * np.pi)) * np.exp(- 0.5 * z**2)
###Output
_____no_output_____
###Markdown
Not all user defined functions will act element-wiseFor example, passing the function `f` defined below a NumPy array causes a `ValueError`
###Code
def f(x):
return 1 if x > 0 else 0
###Output
_____no_output_____
###Markdown
The NumPy function `np.where` provides a vectorized alternative:
###Code
x = np.random.randn(4)
x
np.where(x > 0, 1, 0) # Insert 1 if x > 0 true, otherwise 0
###Output
_____no_output_____
###Markdown
You can also use `np.vectorize` to vectorize a given function
###Code
def f(x): return 1 if x > 0 else 0
f = np.vectorize(f)
f(x) # Passing the same vector x as in the previous example
###Output
_____no_output_____
###Markdown
However, this approach doesn’t always obtain the same speed as a more carefully crafted vectorized function ComparisonsAs a rule, comparisons on arrays are done element-wise
###Code
z = np.array([2, 3])
y = np.array([2, 3])
z == y
y[0] = 5
z == y
z != y
###Output
_____no_output_____
###Markdown
The situation is similar for `>`, `=` and `<=`We can also do comparisons against scalars
###Code
z = np.linspace(0, 10, 5)
z
z > 3
###Output
_____no_output_____
###Markdown
This is particularly useful for *conditional extraction*
###Code
b = z > 3
b
z[b]
###Output
_____no_output_____
###Markdown
Of course we can—and frequently do—perform this in one step
###Code
z[z > 3]
###Output
_____no_output_____
###Markdown
SubpackagesNumPy provides some additional functionality related to scientific programmingthrough its subpackagesWe’ve already seen how we can generate random variables using np.random
###Code
z = np.random.randn(10000) # Generate standard normals
y = np.random.binomial(10, 0.5, size=1000) # 1,000 draws from Bin(10, 0.5)
y.mean()
###Output
_____no_output_____
###Markdown
Another commonly used subpackage is np.linalg
###Code
A = np.array([[1, 2], [3, 4]])
np.linalg.det(A) # Compute the determinant
np.linalg.inv(A) # Compute the inverse
###Output
_____no_output_____
###Markdown
Much of this functionality is also available in [SciPy](http://www.scipy.org/), a collection of modules that are built on top of NumPyWe’ll cover the SciPy versions in more detail [soon](https://lectures.quantecon.org/py/scipy.html)For a comprehensive list of what’s available in NumPy see [this documentation](https://docs.scipy.org/doc/numpy/reference/routines.html) Exercises Exercise 1Consider the polynomial expression$$p(x) = a_0 + a_1 x + a_2 x^2 + \cdots a_N x^N = \sum_{n=0}^N a_n x^n \tag{1}$$[Earlier](https://lectures.quantecon.org/py/python_essentials.htmlpyess-ex2), you wrote a simple function `p(x, coeff)` to evaluate [(1)](equation-np-polynom) without considering efficiencyNow write a new function that does the same job, but uses NumPy arrays and array operations for its computations, rather than any form of Python loop(Such functionality is already implemented as `np.poly1d`, but for the sake of the exercise don’t use this class)- Hint: Use `np.cumprod()` Exercise 2Let `q` be a NumPy array of length `n` with `q.sum() == 1`Suppose that `q` represents a [probability mass function](https://en.wikipedia.org/wiki/Probability_mass_function)We wish to generate a discrete random variable $ x $ such that $ \mathbb P\{x = i\} = q_i $In other words, `x` takes values in `range(len(q))` and `x = i` with probability `q[i]`The standard (inverse transform) algorithm is as follows:- Divide the unit interval $ [0, 1] $ into $ n $ subintervals $ I_0, I_1, \ldots, I_{n-1} $ such that the length of $ I_i $ is $ q_i $ - Draw a uniform random variable $ U $ on $ [0, 1] $ and return the $ i $ such that $ U \in I_i $ The probability of drawing $ i $ is the length of $ I_i $, which is equal to $ q_i $We can implement the algorithm as follows
###Code
from random import uniform
def sample(q):
a = 0.0
U = uniform(0, 1)
for i in range(len(q)):
if a < U <= a + q[i]:
return i
a = a + q[i]
###Output
_____no_output_____
###Markdown
If you can’t see how this works, try thinking through the flow for a simple example, such as `q = [0.25, 0.75]`It helps to sketch the intervals on paperYour exercise is to speed it up using NumPy, avoiding explicit loops- Hint: Use `np.searchsorted` and `np.cumsum` If you can, implement the functionality as a class called `discreteRV`, where- the data for an instance of the class is the vector of probabilities `q` - the class has a `draw()` method, which returns one draw according to the algorithm described above If you can, write the method so that `draw(k)` returns `k` draws from `q` Exercise 3Recall our [earlier discussion](https://lectures.quantecon.org/py/python_oop.htmloop-ex1) of the empirical cumulative distribution functionYour task is to1. Make the `__call__` method more efficient using NumPy 1. Add a method that plots the ECDF over $ [a, b] $, where $ a $ and $ b $ are method parameters Solutions
###Code
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Exercise 1This code does the job
###Code
def p(x, coef):
X = np.empty(len(coef))
X[0] = 1
X[1:] = x
y = np.cumprod(X) # y = [1, x, x**2,...]
return coef @ y
###Output
_____no_output_____
###Markdown
Let’s test it
###Code
coef = np.ones(3)
print(coef)
print(p(1, coef))
# For comparison
q = np.poly1d(coef)
print(q(1))
###Output
[1. 1. 1.]
3.0
3.0
###Markdown
Exercise 2Here’s our first pass at a solution:
###Code
from numpy import cumsum
from numpy.random import uniform
class DiscreteRV:
"""
Generates an array of draws from a discrete random variable with vector of
probabilities given by q.
"""
def __init__(self, q):
"""
The argument q is a NumPy array, or array like, nonnegative and sums
to 1
"""
self.q = q
self.Q = cumsum(q)
def draw(self, k=1):
"""
Returns k draws from q. For each such draw, the value i is returned
with probability q[i].
"""
return self.Q.searchsorted(uniform(0, 1, size=k))
###Output
_____no_output_____
###Markdown
The logic is not obvious, but if you take your time and read it slowly,you will understandThere is a problem here, howeverSuppose that `q` is altered after an instance of `discreteRV` iscreated, for example by
###Code
q = (0.1, 0.9)
d = DiscreteRV(q)
d.q = (0.5, 0.5)
###Output
_____no_output_____
###Markdown
The problem is that `Q` does not change accordingly, and `Q` is thedata used in the `draw` methodTo deal with this, one option is to compute `Q` every time the drawmethod is calledBut this is inefficient relative to computing `Q` once offA better option is to use descriptorsA solution from the [quanteconlibrary](https://github.com/QuantEcon/QuantEcon.py/tree/master/quantecon)using descriptors that behaves as we desire can be found[here](https://github.com/QuantEcon/QuantEcon.py/blob/master/quantecon/discrete_rv.py) Exercise 3An example solution is given belowIn essence we’ve just taken [thiscode](https://github.com/QuantEcon/QuantEcon.py/blob/master/quantecon/ecdf.py)from QuantEcon and added in a plot method
###Code
"""
Modifies ecdf.py from QuantEcon to add in a plot method
"""
class ECDF:
"""
One-dimensional empirical distribution function given a vector of
observations.
Parameters
----------
observations : array_like
An array of observations
Attributes
----------
observations : array_like
An array of observations
"""
def __init__(self, observations):
self.observations = np.asarray(observations)
def __call__(self, x):
"""
Evaluates the ecdf at x
Parameters
----------
x : scalar(float)
The x at which the ecdf is evaluated
Returns
-------
scalar(float)
Fraction of the sample less than x
"""
return np.mean(self.observations <= x)
def plot(self, a=None, b=None):
"""
Plot the ecdf on the interval [a, b].
Parameters
----------
a : scalar(float), optional(default=None)
Lower end point of the plot interval
b : scalar(float), optional(default=None)
Upper end point of the plot interval
"""
# === choose reasonable interval if [a, b] not specified === #
if a is None:
a = self.observations.min() - self.observations.std()
if b is None:
b = self.observations.max() + self.observations.std()
# === generate plot === #
x_vals = np.linspace(a, b, num=100)
f = np.vectorize(self.__call__)
plt.plot(x_vals, f(x_vals))
plt.show()
###Output
_____no_output_____
###Markdown
Here’s an example of usage
###Code
X = np.random.randn(1000)
F = ECDF(X)
F.plot()
###Output
_____no_output_____
###Markdown
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Numpy
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
An array object represents a multidimensional, homogeneous array of fixed-size items
###Code
country = np.array(['USA', 'Japan', 'UK', 'Celestopoli', 'India', 'China'])
print(country)
###Output
['USA' 'Japan' 'UK' 'Celestopoli' 'India' 'China']
###Markdown
When you make numpy array using np.array than it would create a copy of the object array and would not reflect changes to the original array (with default parameters). On the other hand if you use np.asarray() than it would reflect changes to the original array.
###Code
x = np.array([2,3,1,0])
x
x.shape
y = np.asarray([4,5,3,1])
y
type(x)
y = np.array([[2,3,1,0],[1,1,1,1]])
y.shape
np.zeros((2, 3))
k =np.ones((2, 3))
k
k.shape
np.arange(10)
np.arange(2, 10, dtype=float) # default strp=1
np.linspace(1., 4.,num=10)
np.linspace(1., 4.,10)
x = np.array([3, 6, 9, 12])
x/3.0
x
print(x)
z = np.array([2,3,1,0])
np.sort(z)
a = np.array([[1, 2, 3], [4, 5, 6]])
b = np.array([[9, 8, 7], [6, 5, 4]])
c = np.concatenate((a, b))
c
c.shape
np.ones((2, 2), dtype=bool)
## Conversion to an array:
import numpy as np
# from list
my_list = [1, 2, 3, 4, 5, 6, 7, 8]
my_list = np.asarray(my_list)
#type(my_list)
my_list
# from tuple
my_tuple = ([8, 4, 6], [1, 2, 3])
type(my_tuple)
my_tuple = np.asarray(my_tuple)
my_tuple
#Array to list
a = np.array([1, 2, 3, 4, 5])
b = a.tolist()
b
#save array to drive
a = np.array([1, 2, 3, 4, 5])
np.savetxt("il_mio_array.csv", a)
# il filse csv deve essere sul drive
from google.colab import files
files.download("il_mio_array.csv")
#######################################################
a = np.array([1,2,3])
a
b =np.append(a,[10,11,12,13])
b
a = np.array([1,2,3])
b = np.array([1,2,3])
c = a+b
c
a = np.array([[1,2,3],[4,5,6]])
b = np.array([[400], [800]])
c = np.append(a, b)
c
a = np.array([[1,2,3],[4,5,6]])
b = np.array([[400], [800]])
c = np.append(a, b, axis = 1)
c
d = np.array([1, 2, 3])
e = np.insert(a, 1, 90) ## specify whick index add the value we want
e
d = np.array([1, 2, 3])
e = np.insert(d, 0, 60) ## specify whick index add the value we want
e
len(d)
d = np.array([1, 2, 3])
e = np.insert(d, 3, 90) ## specify whick index add the value we want
e
#APPEND A ROW
a = np.array([[1, 2, 3], [4, 5, 6]])
b = np.append(a, [[50, 60, 70]], axis = 0)
b
## se volessi per colonna?
a = np.array([[1, 2, 3], [4, 5, 6]])
c = np.array([[50],[60]])
b = np.append(a, c, axis = 1)
b
### Delete an element
x = np.array([1, 2, 3])
x= np.delete(x, 1, axis = 0)
x
#delete an entire row (axis = 0)
x = np.array([[1, 2, 3], [4, 5, 6], [10, 20, 30]])
x = np.delete(x, 2, axis = 0)
x
x.shape
#delete an entire column (axis = 1)
x = np.array([[1, 2, 3], [4, 5, 6], [10, 20, 30]])
x = np.delete(x, 2, axis = 1)
x
#find the index value
a = np.array([1, 2, 3, 4, 5])
print("il valore 5 si trova all'index: ", np.where(a == 5))
######### automatic addition
addizione = lambda x: x + 2
a = np.array([1, 2, 3, 4, 5, 6])
b = addizione(a)
b
moltiplicazione = lambda x: x**2
a = np.array([1, 2, 3, 4, 5, 6])
b = moltiplicazione(a)
b
età=18
if età <18:
print('sei minorenne')
else:
print('sei maggiorenne')
età=18
if età <10:
print('sei un bimbo')
elif età<=18:
print('sei un regaz')
elif età<50:
print('sei un boomer')
else:
print('bella vecchio! sei anziano')
età=50
patente=False
if età >=18 and patente==True:
print('puoi guidare finalmente la tua auto da sogno')
elif età >=18 and patente==False:
print('no patente, no party')
else:
print('mi dispiace, ritenta quando sarai più grande!!')
###Output
no patente, no party
###Markdown
Funzioni
###Code
def somma_due_numeri(a,b): ## a,b sono argomenti
#print('questa funzione somma due numeri:')
risultato = a + b
return risultato
# OUTPUT DI UNA FUNZIONE è IL VALORE DEL SUO RETURN
# TUTTE LE FUNZIONI HANNO UN RETURN
## input
x =input()
#x = input('inserire qualcosa ')
type(x)
### PROGRAMMA
print('questo è il mio programma in python')
nome=input('inserire il tuo nome ')
print('buon pomeriggio '+nome+' spero tu stia imparando tantissimo!')
età = input('quanti anni hai?')
print('Bene!!! dunque hai '+ età + ' anni. Il porssimo anno ne avrai '+ str(int(età)+1))
###Output
_____no_output_____
###Markdown
LİNSPACE iki nokta arasında eşit aralık koyarak array oluşturur.
###Code
np.linspace(4,10,3)
##EYE
np.eye(4)
np.random.randn(8) #normal dagılam uygun veriler çıkartır.
np.random.randn(5,5)
np.random.randint(1,10) #10 hariç tüm degerleri döndürebilir.
np.random.randint(1,20,5) #1 den 20 ye rastgele 5 sayı döndürür.
benimdizi = np.random.randint(10,100,30)
benimdizi
###Output
_____no_output_____
###Markdown
NumPy
###Code
from doctest import testmod as run_doctest
import numpy as np
%pip install --upgrade numpy
np.__version__
###Output
_____no_output_____
###Markdown
Precision
###Code
0.1
0.2
0.3
0.1 + 0.2 == 0.3
0.1 + 0.2
round(0.1+0.2, 16) == 0.3
round(0.1+0.2, 17) == 0.3
1.23 == (123) * 1E-2
from decimal import Decimal
%%timeit -r 100 -n 10_000
Decimal('0.1') + Decimal('0.2')
%%timeit -r 100 -n 10_000
0.1 + 0.2
###Output
10.7 ns ± 4.71 ns per loop (mean ± std. dev. of 100 runs, 10000 loops each)
###Markdown
NumPy Built-ins
###Code
np.pi
np.e
np.inf
np.Infinity
np.PINF
np.Inf
float('inf') == np.inf
np.inf + -np.inf
a = np.array( [1,2,np.inf] )
np.isinf(a)
np.nan
np.NaN
np.NAN
float('nan') == np.nan
bool(np.nan)
np.nan == None
a = np.array([1,2,np.nan])
np.isnan(a)
np.isinf(a)
###Output
_____no_output_____
###Markdown
Array Create* ajlkdjaskldjsa* asdkjsaldjlas* jaklsdjlasd* asdsad1. asdkjas;kdasd1. asdasdasd1. asdkjasdklasd1. adka;skdlaskd|Imie|Nazwisko||----|--------||Mark|Watney ||Melissa|Lewis|
###Code
np.array([1.111, 2.222, 3.333, 9.9999], dtype=Decimal)
np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
np.array([[1,2,3], [4,5,6], [7,8,9]])
np.array([
[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]],
[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]],
[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]])
list( range(3,10,2) )
# start = 0
# stop = ...
# step = 1
list( range(10) )
# dolny zakres włącznie
# górny zakres rozłącznie
# Wyjątki
# - pandas
# - randint
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
np.array(range(10))
np.array(range(3,10,2), dtype=float)
# numpy
# python
np.array( [round(x, 2) for x in range(0,100,3) if x%2==0] , dtype=float)
[round(x, 2) for x in range(0,100,3) if x%2==0]
[round(x, 2) for x in range(0,100,3) if x%2==0]
[round(x, 2)
for x in range(0,100,3)
if x%2==0]
result = []
for x in range(0,100,3):
if x % 2 == 0:
result.append(x)
result
data = [x for x in range(0,10)]
data
for i in data:
print(i)
if i == 3:
break
for i in data:
print(i)
if i == 6:
break
list(data)
list(data)
data = (x for x in range(0,10))
print(data)
for i in data:
print(i)
if i == 3:
break
for i in data:
print(i)
if i == 6:
break
list(data)
list(data)
a = (x for x in range(0,10000000))
b = [x for x in range(0,10000000)]
import sys
sys.getsizeof(a)
sys.getsizeof(b)
np.arange(0.0, 1.0, step=0.1)
np.linspace(2.0, 3.0, num=50)
np.zeros(shape=(2,3), dtype=int)
np.zeros(shape=(2,3))
a = np.array([[1,2],
[3,4],
[5,6]])
np.zeros_like(a)
np.ones(shape=(5,7), dtype=int)
np.ones_like(a)
np.empty(shape=(2,3))
a = np.array([[1,2],
[3,4],
[5,6]])
np.empty_like(a)
np.full((2,3), np.inf)
np.identity(4, int)
a
import math
math.sin(3.14)
a = np.array([3.14])
np.sin(a)
a
type(a)
"""
* Assignment: Numpy Create Arange
* Complexity: easy
* Lines of code: 1 lines
* Time: 3 min
English:
1. Create `a: np.ndarray` with even numbers from 0 to 100 (without 100)
2. Numbers must be `float` type
3. Run doctests - all must succeed
Polish:
1. Stwórz `a: np.ndarray` z liczbami parzystymi od 0 do 100 (bez 100)
2. Liczby muszą być typu `float`
3. Uruchom doctesty - wszystkie muszą się powieść
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> type(result) is np.ndarray
True
>>> result
array([ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18., 20., 22., 24.,
26., 28., 30., 32., 34., 36., 38., 40., 42., 44., 46., 48., 50.,
52., 54., 56., 58., 60., 62., 64., 66., 68., 70., 72., 74., 76.,
78., 80., 82., 84., 86., 88., 90., 92., 94., 96., 98.])
"""
import numpy as np
result = np.arange(0, 100, 2, dtype=float)
run_doctest()
"""
>>> name = 'Mark'
>>> print(name)
Mark
"""
run_doctest()
import numpy as np
np.__version__
np.array([1,2,3], dtype=np.int32)
import sys
a = np.array([1,2,3], dtype=np.int8)
b = np.array([1,2,3], dtype=np.int32)
c = np.array([1,2,3], dtype=np.int64)
sys.getsizeof(a)
sys.getsizeof(b)
sys.getsizeof(c)
"""
* Assignment: Numpy Dtype Astype
* Complexity: easy
* Lines of code: 2 lines
* Time: 3 min
English:
1. Given `DATA: np.ndarray` (see below)
2. Convert to `int` and save result as `result_int`
3. Convert to `bool` and save result as `result_bool`
4. What happened in each of those steps?
5. Run doctests - all must succeed
Polish:
1. Dany `DATA: np.ndarray` (patrz sekcja input)
2. Przekonwertuj do typu `int` i wynik zapisz jako `result_int`
3. Przekonwertuj do typu `bool` i wynik zapisz jako `result_bool`
4. Co się stało w każdym z tych kroków?
5. Uruchom doctesty - wszystkie muszą się powieść
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> type(result_int) is np.ndarray
True
>>> type(result_bool) is np.ndarray
True
>>> result_int
array([[-1, 0, 1],
[ 2, 3, 4]])
>>> result_bool
array([[ True, False, True],
[ True, True, True]])
"""
import numpy as np
DATA = np.array([[-1.1, 0.0, 1.1],
[2.2, 3.3, 4.4]])
result_int = DATA.astype(int)
result_bool = DATA.astype(bool)
run_doctest()
a = np.arange(100)
a
a.reshape(10, 10)
a.astype(float)
a = np.arange(100).reshape(20, 5)
a
a.shape
a = np.arange(100).reshape(19, 5)
a = np.arange(100).reshape(5, 10, 2)
a.shape
a
a.flatten()
a.ravel()
a
_ = 'Mark'
print(_)
1 + 2
print(_)
_
first_name = 'Mark'
imie, nazwisko, *_ = 'Mark,Watney,44,170,75'.split(',')
imie
nazwisko
_
"""
* Assignment: Numpy Shape 1d
* Complexity: easy
* Lines of code: 2 lines
* Time: 3 min
English:
1. Define `result_ravel` with result of flattening `DATA` using `.ravel()` method
2. Define `result_flatten` with result of flattening `DATA` using `.flatten()` method
3. Define `result_reshape` with result of reshaping `DATA` into 1x9
4. Run doctests - all must succeed
Polish:
1. Zdefiniuj `result_ravel` z wynikiem spłaszczenia `DATA` używając metody `.ravel()`
2. Zdefiniuj `result_flatten` z wynikiem spłaszczenia `DATA` używając metody `.flatten()`
3. Zdefiniuj `result_reshape` z wynikiem zmiany kształtu `DATA` na 1x9
4. Uruchom doctesty - wszystkie muszą się powieść
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> type(result_ravel) is np.ndarray
True
>>> type(result_flatten) is np.ndarray
True
>>> type(result_reshape) is np.ndarray
True
>>> result_flatten
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> result_ravel
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> result_reshape
array([[1, 2, 3, 4, 5, 6, 7, 8, 9]])
"""
import numpy as np
DATA = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
result_ravel = DATA.ravel()
result_flatten = DATA.flatten()
result_reshape = DATA.reshape(1, 9)
run_doctest()
result_ravel
result_flatten
result_reshape
"""
* Assignment: Numpy Attributes
* Complexity: easy
* Lines of code: 7 lines
* Time: 5 min
English:
1. Define `result: dict` with:
a. number of dimensions;
b. number of elements;
c. data type;
d. element size;
e. shape;
f. strides.
2. Run doctests - all must succeed
Polish:
1. Zdefiniuj `result: dict` z:
a. liczbę wymiarów,
b. liczbę elementów,
c. typ danych,
d. rozmiar elementu,
e. kształt,
f. przeskoki (strides).
2. Uruchom doctesty - wszystkie muszą się powieść
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> type(result) is dict
True
>>> result # doctest: +NORMALIZE_WHITESPACE
{'number of dimensions': 2,
'number of elements': 6,
'data type': dtype('float64'),
'element size': 8,
'shape': (2, 3),
'strides': (24, 8)}
"""
import numpy as np
DATA = np.array([[-1.1, 0.0, 1.1],
[2.2, 3.3, 4.4]])
result = {
'number of dimensions': DATA.ndim,
'number of elements': DATA.size,
'data type': DATA.dtype,
'element size': DATA.itemsize,
'shape': DATA.shape,
'strides': DATA.strides,
}
run_doctest()
###Output
_____no_output_____
###Markdown
NumPy Indexes* `int` - Indeks skalarny* `list[int]` - Indeks wektorowy* `list[bool]` - Indeks binarny* `tuple` - Multi-index
###Code
a = np.arange(123,134)
a
a[5]
a[ [0,5,7,0] ]
a[ [True, False, True, True, False, True, True, False, True, False, True] ]
a = np.array([[1, 2, 3],
[4, 5, 6]])
a[0][2]
a[0,2]
a.flat[5]
a[-1]
a = np.arange(0,100).reshape(10, 10)
a
a[2,4]
a[-1]
a[-5]
a[2,-5]
a = np.array([[[ 1, 2, 3],
[ 4, 5, 6],
[ 5, 6, 7]],
[[11, 22, 33],
[44, 55, 66],
[77, 88, 99]]])
a[1][2][0]
a[1,2,0]
a
a[1,2,0] = 0
a
a = np.array([1, 2, 3], float)
a[2] = 99
a
a = np.array([1, 2, 3], int)
a[0] = 99.99999
a
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
a[0]
a[0], a[1]
a[0,2], a[2,0]
a[0,2], a[2,0], a[0]
a[ 0 ]
a[ [0,1] ]
a[ [True,False, True], [1,2] ]
###Output
_____no_output_____
###Markdown
* `int` - Indeks skalarny* `list[int]` - Indeks wektorowy* `list[bool]` - Indeks binarny* `tuple` - Multi-index
###Code
"""
* Assignment: Numpy Indexing
* Complexity: easy
* Lines of code: 5 lines
* Time: 5 min
English:
1. Create `result: np.ndarray`
2. Add to `result` elements from `DATA` at indexes:
a. row 0, column 2
b. row 2, column 2
c. row 0, column 0
d. row 1, column 0
3. `result` size must be 2x2
4. `result` type must be float
5. Run doctests - all must succeed
Polish:
1. Stwórz `result: np.ndarray`
2. Dodaj do `result` elementy z `DATA` o indeksach:
a. wiersz 0, kolumna 2
b. wiersz 2, kolumna 2
c. wiersz 0, kolumna 0
d. wiersz 1, kolumna 0
3. Rozmiar `result` musi być 2x2
4. Typ `result` musi być float
5. Uruchom doctesty - wszystkie muszą się powieść
Hints:
* `np.zeros(shape, dtype)`
Tests:
>>> import sys; sys.tracebacklimit = 0
>>> type(result) is np.ndarray
True
>>> result
array([[3., 9.],
[1., 4.]])
"""
import numpy as np
DATA = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
])
result = np.array([
[DATA[0,2], DATA[2,2]],
[DATA[0,0], DATA[1,0]],
], dtype=float)
run_doctest()
###Output
_____no_output_____
###Markdown
Numpy 入门教程(Jupyter Notebook版)作者:Sttot([email protected]) [GitHub](https://github.com/Gk0Wk)本教程为结合若干开源教程和个人学习总结的成果,参考资料均为开源免费,请勿将此教程用于商业目的。欢迎转发和补充,转发请标明出处和作者。---参考资料:\[1\] https://jalammar.github.io/visual-numpy/\[2\] https://www.runoob.com/numpy/numpy-tutorial.html\[3\] https://blog.csdn.net/vicdd/article/details/52667709 介绍 什么是 NumPyNumPy 是一个 Python 的张量(标量、向量、矩阵和高维张量)的运算库,并提供了大量的数学方法。\[1\]NumPy 的前身 Numeric 最早是由 Jim Hugunin 与其它协作者共同开发,2005 年,Travis Oliphant 在 Numeric 中结合了另一个同性质的程序库 Numarray 的特色,并加入了其它扩展而开发了 NumPy。NumPy 为开放源代码并且由许多协作者共同维护开发。\[2\] NumPy 有什么用因为 NumPy 可以很方便的表示张量并进行计算,并因此 NumPy 被广泛用于 Python 生态中的数据分析、机器学习和科学计算领域。很多流行的包是基于 NumPy 开发的,例如 scikit-learn, SciPy, pandas 以及 tensorflow 。NumPy 通常与 SciPy(Scientific Python)和 Matplotlib(绘图库)一起使用, 这种组合广泛用于替代 MatLab,是一个强大的科学计算环境,有助于我们通过 Python 学习数据科学或者机器学习。 NumPy 的主要内容* 一个强大的N维数组对象 `ndarray` 用来描述任意维的张量(对应Tensorflow和PyTorch中的`tensor`)。* 生成 `ndarray` 对象的一些方法,尤其是各种随机生成/抽样初始化方法。* 广播功能函数能对 `ndarray` 对象的整体进行批量操作而不需要逐个元素计算。NumPy 提供了一系列实用的数学广播功能函数(线性代数、傅里叶变换、统计学函数等)。* 整合 C/C++/Fortran 代码的工具。 如何安装 NumPy参考[菜鸟教程](https://www.runoob.com/numpy/numpy-install.html)。文章后面的样例需要用到 NumPy ,请确保安装 NumPy 以获得最好的阅读效果。使用 NumPy 需要先将其引用到自己的 Python 脚本中:```pythonimport numpy```而一种惯例写法经常将导入的 NumPy 包起别名为`np`:```pythonimport numpy as np```在下文中也会看到`np.xxx`的这种写法,而在实际中究竟使用`numpy.xxx`还是`np.xxx`全凭自己的习惯。---注意:为方便起见,下文视`ndarray`(NumPy数组对象)和数学概念`张量`为等价概念,而`数组`则指Python原生的`array`数据类型。而`标量`、`向量`和`矩阵`都是`张量`的特殊类型,即`ndarray`的特殊类型。本文不涉及过多的线性代数知识和Python知识,大都点到为止,看不懂的朋友最好温习一下之前学过的知识。 NumPy的核心概念:`ndarray`对象 ~ 张量NumPy 是一个以张量计算为核心的库,所以用以表示张量的`ndarray`对象就是整个库的核心和基础。`ndarray`对象,其名称是`n-dimension array(n维数组)`的缩写,从名字来看是像极了Python和其他高级语言中多维数组的概念的。在Python中,我们可以怎样用数组来表示一个XYZ直角坐标系下的向量$\left(1,1,1\right)$呢?像这样:```pythonvector = [1, 1, 1]```那如果是一个2x2的单位矩阵$\begin{bmatrix}1 & 0 \\ 0 & 1\end{bmatrix}$,如何表示?我们可以用一个包含数组的数组所构成的“二维数组”来做:```pythoneye_matrix = [[1, 0], [0, 1]]```如果还有更高的维度,那就接着套娃。一个包含基础数据类型的数组是1维数组,而包含若干n-1维数组(等长)的数组构成n维数组,这是n维数组的归纳定义。对于n维数组而言,其包含若干n-1维数组,而n-1维数组各自包含若干n-2维数组...以此类推。而`0`和`1`这样处于最低维度的基础数据就是数组的`元素`。在NumPy中也是如此,`ndarray`就是NumPy所提供的一种特殊的多维数组,用以表示标量(0维张量)、向量(1维张量)、矩阵(2维张量)和更高维张量。 `ndarray`的概念构成上文提到了`ndarray`的一个重要概念,就是维度(即“秩”)。维度说明这个`ndarray`(这个张量)总共套了多少层。除了维度,张量的“形状”也是很重要的。什么是张量的“形状”?张量每一维包含的元素数量就是张量的“形状”。同样应用矩阵来举例,对下面的矩阵:$$\left. \begin{bmatrix} 1 & 2 & 3 \\ 4 & 5 & 6 \end{bmatrix}\right\}\text{2 line vectors}$$其由2行、3列构成,如果将矩阵看做是一个若干“行向量”构成的2维张量,那么它的形状就是2x3。因为“行组成列”,所以列是高维度,行是低维度;高维度在前而低维度在后,所以写成2x3。除了“形状”,`ndarray`还有一个重要的属性,就是其包含的元素的类型。和Python数组不同,`ndarray`要求其内的元素(指张量的元素)必须是相同的类型,且类型必须从给定的若干种中选择一种或者自定义一种类型(后面会提到)。这样主要是便于 NumPy 管理内存并实现算法。综上所述,`ndarray`的几个概念:* 维度(又称“秩”):张量“套”了多少层* 形状:张量的每一层的长度* 元素的数据类型用这三个概念就可以定义一个`ndarray`对象了。 产生一个`ndarray`对象 用指定的输入数据创建`ndarray`对象```pythonnumpy.array(object, dtype = None, copy = True, order = None, subok = False, ndmin = 0)```* object 是指定的输入数据,一般是整数、实数、布尔值或者由其构成的多维数组,或者其他`ndarray`类型、`matrix`类型(NumPy中另外一种专用于描述矩阵的类型,二者的对比在后面会提到)等。为什么说“一般”呢?感兴趣的可以试一下`numpy.array([1, [1]])`和`numpy.array('1')`看看会发生什么,这里不做探讨。* dtype 是要创建`ndarray`的元素数据类型,如果是None的话会自动从object推断。* copy 对象是否需要复制* order 是`ndarray`的表示和存储方式,`'C'`为行方向,`'F'`为列方向,`'A'`为任意方向(默认)* subok `numpy.array`得到的对象强制转化成`array`还是要继承object的类型,一般不需要考虑这个选项,而如果输入的是`matrix`类型而不希望通过`numpy.array()`返回`ndarray`类型就需要改成`True`。[详见此](https://stackoverflow.com/questions/50548341/what-is-the-purpose-and-utility-of-the-subok-option-in-numpy-zeros-like)。* ndmin 指定生成数组的最小维度。如果输入数据的维度小于这个,那么就会在外面套。我们可以很轻松的创建一个`ndarray`对象,例如: 如何指定元素数据类型`dtype`在`numpy.array()`中指定`dtype`即可,常用的`dtype`有`numpy.int64`, `numpy.float64`, `numpy.complex128`等,这些都是 NumPy 内置的`dtype`对象,详见[菜鸟教程-NumPy数据类型](https://www.runoob.com/numpy/numpy-dtype.html)的表格。`dtype`也可以自行定义,通过`numpy.dtype(object, align, copy)`方法构造,如感兴趣,具体方法同样在[菜鸟教程-NumPy数据类型](https://www.runoob.com/numpy/numpy-dtype.html)内,但是一般不会用到,不需要了解。 创建特殊的`ndarray`对象 产生一个从0到n-1的一维`ndarray`对象(类似`range`)```pythonnumpy.arange(5)``` 创建一个以0填充的空张量```pythonnumpy.zeros(shape, dtype = float, order = 'C')```shape是指张量的形状,用`tuple`元组来表示,如:
###Code
import numpy
numpy.zeros((1,2,3))
###Output
_____no_output_____
###Markdown
高维在前,低维在后。order 是`ndarray`的表示和存储方式,`'C'`为行方向,`'F'`为列方向 创建一个以1填充的张量```pythonnumpy.ones(shape, dtype = float, order = 'C')``` 创建一个未初始化的张量`ndarray`对象直接分配并映射到内存中的一块区域(后面会讲),上面的几种方法都是在映射内存之后,在这个区域进行进一步的初始化得到的。初始化赋值是有时间开销的,尤其是很大的张量,如果我们不需要这种初始化,后续步骤的赋值也可以将张量填满,就可以用这样的方式创建一个空的张量,以节省初始化开销。```pythonnumpy.empty(shape, dtype = float, order = 'C')```注意:这样得到的张量,其内容一开始是未知的。
###Code
import numpy
numpy.empty((2,3))
###Output
_____no_output_____
###Markdown
创建随机的`ndarray`对象实际应用中有很多需要生成随机张量的情境,例如为输入数据加入噪声、为神经网络初始化参数矩阵等。NumPy 提供了很多生成随机张量和随机取样的方法。`numpy.random`模块中有很多产生随机张量的方法,一般来讲他们的输入可以有n个,结果会产生一个n维的随机张量,比如我使用`numpy.random.rand(2,3)`就会产生一个2x3的随机矩阵(二维张量)。* `rand(dn, dn-1, ...)`函数在[0,1)中平均随机取样,参数为各维度的尺寸;* `randn(dn, dn-1, ...)`函数在标准正态分布中取样。对于想得到一般正态分布$N\left(\mu,\sigma^2\right)$,就用`sigma*randn(...)+mu`就好了。* `seed()`和`RandomState()`两个都是设置随机数种子的,功能一样。 * `get_state()`和`set_state(state)`获取和设置内部随机生成器的状态(参数是元组)。---* `randint(low, high=None, size=None, dtype)`在`(high==None)?[0,low):[low,high)`上(能看懂就好)生成size(整数或tuple)指定形状的整数随机张量(None就是一个0维张量,即一个标量)。dytype默认为int64。 * 如果想要闭区间怎么办?用`random.random_integers`。* `uniform(low, high=None, size=None)`同上,左开右闭,构建的是浮点数。---* `random_sample(size=None)`返回指定形状的`[0,1)`随机浮点数张量。 * `random`、`ranf`和`sample`和`random_sample`用法一样,应该是别名函数。* `bytess(length)` 返回`length`长度的随机字节串。---* `choice(a, size=None, replace=True, p=None)`从a(list或这int,如果是单个int那就是从 0~a-1 中取,list中不一定是int可以是各种obj)中选择构建出一个size形状的张量,replace是是否可以重复取,p为a中每个元素对应的概率。
###Code
import numpy
print("rand:")
print(numpy.random.rand(2, 3))
print("randn:")
print(numpy.random.randn(2, 3))
print("randint:"),
print(numpy.random.randint(2, 4, size=(10)))
print("uniform:"),
print(numpy.random.uniform(0.0, 3.14, size=(4)))
print("random_sample:")
print(numpy.random.random_sample())
print("choice:")
print(numpy.random.choice(['香蕉', '苹果', '鸭梨'], size=(3,3), p=[0.3, 0.5, 0.2]))
print('bytes:')
print(numpy.random.bytes(10))
###Output
rand:
[[0.85920854 0.42035174 0.58805887]
[0.07278132 0.87678886 0.91388382]]
randn:
[[-0.14559082 -1.02213995 -1.29889556]
[-0.77083507 -0.11929756 0.97962762]]
randint:
[2 2 3 2 2 2 3 3 2 2]
uniform:
[0.71706289 1.09886982 0.75317378 1.42878006]
random_sample:
0.7879549550859939
choice:
[['鸭梨' '香蕉' '苹果']
['香蕉' '鸭梨' '香蕉']
['苹果' '苹果' '苹果']]
bytes:
b'mMd\xf86Lwz;v'
###Markdown
* `shuffle(x)` x是一个`ndarray`,函数会将这个x重新洗牌,打乱顺序。* `permutation(x)` 如果x是一个整数,就返回一个 0~x-1 随机排列的一维`ndarray`;如果是`ndarray`就会产生一个新的排列(不改变输入、不改变形状)。
###Code
import numpy
arr = numpy.arange(10)
numpy.random.shuffle(arr)
print(arr)
numpy.random.permutation(numpy.arange(9).reshape((3, 3)))
###Output
[4 8 3 9 1 5 6 7 0 2]
###Markdown
Numpy for:* linear algebra* image processing* signal processing* .. 
###Code
import numpy as np
my_list = np.array(range(10))
print(type(my_list))
my_list
np.arange(10)
np.linspace(0, 10, 20)
###Output
_____no_output_____
###Markdown
Indexing & slicing
###Code
my_list[0]
my_list[0:2]
###Output
_____no_output_____
###Markdown
Operations
###Code
my_list * 2
my_list[ my_list % 2 == 0 ] * 2
my_list[ (my_list < 3) | (my_list > 6) ]
###Output
_____no_output_____
###Markdown
Universal functions (ufunc)
###Code
my_list_one = np.arange(0, 10)
my_list_two = np.arange(10, 20)
print(my_list_one)
print(my_list_two)
my_list_one + my_list_two
###Output
[0 1 2 3 4 5 6 7 8 9]
[10 11 12 13 14 15 16 17 18 19]
###Markdown
Shape
###Code
my_list.shape
###Output
_____no_output_____
###Markdown
Fancy Indexing
###Code
my_list.reshape(2, 5)
my_list.reshape(5, 2)
### Stats
my_list.mean()
np.mean(my_list)
my_list.sum()
###Output
_____no_output_____
###Markdown
Work with nan (not a number)
###Code
np.mean([0, 1, 2, 3, np.nan])
np.nanmean([1, 2, 3, np.nan])
np.nanmax([1, 2, 3, np.nan])
np.nanmin([1, 2, 3, np.nan])
###Output
_____no_output_____
###Markdown
Randomization
###Code
[np.random.rand() for x in range(10)]
np.mean([np.random.rand() for x in range(100000)])
[np.random.rand() for x in range(10)]
np.random.seed(2017)
[np.random.rand() for x in range(10)]
np.random.rand(5, 2)
###Output
_____no_output_____
###Markdown
Math things
###Code
np.math.log(10)
np.log(10)
np.math.log2(10)
np.exp( np.log(10) )
###Output
_____no_output_____
###Markdown
**Tools - NumPy***NumPy is the fundamental library for scientific computing with Python. NumPy is centered around a powerful N-dimensional array object, and it also contains useful linear algebra, Fourier transform, and random number functions.* Creating arraysFirst let's make sure that this notebook works both in python 2 and 3:
###Code
from __future__ import division, print_function, unicode_literals
###Output
_____no_output_____
###Markdown
Now let's import `numpy`. Most people import it as `np`:
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
`np.zeros` The `zeros` function creates an array containing any number of zeros:
###Code
np.zeros(5)
###Output
_____no_output_____
###Markdown
It's just as easy to create a 2D array (ie. a matrix) by providing a tuple with the desired number of rows and columns. For example, here's a 3x4 matrix:
###Code
np.zeros((3,4))
###Output
_____no_output_____
###Markdown
Some vocabulary* In NumPy, each dimension is called an **axis**.* The number of axes is called the **rank**. * For example, the above 3x4 matrix is an array of rank 2 (it is 2-dimensional). * The first axis has length 3, the second has length 4.* An array's list of axis lengths is called the **shape** of the array. * For example, the above matrix's shape is `(3, 4)`. * The rank is equal to the shape's length.* The **size** of an array is the total number of elements, which is the product of all axis lengths (eg. 3*4=12)
###Code
a = np.zeros((3,4))
a
a.shape
a.ndim # equal to len(a.shape)
a.size
###Output
_____no_output_____
###Markdown
N-dimensional arraysYou can also create an N-dimensional array of arbitrary rank. For example, here's a 3D array (rank=3), with shape `(2,3,4)`:
###Code
np.zeros((2,3,4))
###Output
_____no_output_____
###Markdown
Array typeNumPy arrays have the type `ndarray`s:
###Code
type(np.zeros((3,4)))
###Output
_____no_output_____
###Markdown
`np.ones`Many other NumPy functions create `ndarrays`.Here's a 3x4 matrix full of ones:
###Code
np.ones((3,4))
###Output
_____no_output_____
###Markdown
`np.full`Creates an array of the given shape initialized with the given value. Here's a 3x4 matrix full of `π`.
###Code
np.full((3,4), np.pi)
###Output
_____no_output_____
###Markdown
`np.empty`An uninitialized 2x3 array (its content is not predictable, as it is whatever is in memory at that point):
###Code
np.empty((2,3))
###Output
_____no_output_____
###Markdown
np.arrayOf course you can initialize an `ndarray` using a regular python array. Just call the `array` function:
###Code
np.array([[1,2,3,4], [10, 20, 30, 40]])
###Output
_____no_output_____
###Markdown
`np.arange`You can create an `ndarray` using NumPy's `range` function, which is similar to python's built-in `range` function:
###Code
np.arange(1, 5)
###Output
_____no_output_____
###Markdown
It also works with floats:
###Code
np.arange(1.0, 5.0)
###Output
_____no_output_____
###Markdown
Of course you can provide a step parameter:
###Code
np.arange(1, 5, 0.5)
###Output
_____no_output_____
###Markdown
However, when dealing with floats, the exact number of elements in the array is not always predictible. For example, consider this:
###Code
print(np.arange(0, 5/3, 1/3)) # depending on floating point errors, the max value is 4/3 or 5/3.
print(np.arange(0, 5/3, 0.333333333))
print(np.arange(0, 5/3, 0.333333334))
###Output
[ 0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
[ 0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
[ 0. 0.33333333 0.66666667 1. 1.33333334]
###Markdown
`np.linspace`For this reason, it is generally preferable to use the `linspace` function instead of `arange` when working with floats. The `linspace` function returns an array containing a specific number of points evenly distributed between two values (note that the maximum value is *included*, contrary to `arange`):
###Code
print(np.linspace(0, 5/3, 6))
###Output
[ 0. 0.33333333 0.66666667 1. 1.33333333 1.66666667]
###Markdown
`np.rand` and `np.randn`A number of functions are available in NumPy's `random` module to create `ndarray`s initialized with random values.For example, here is a 3x4 matrix initialized with random floats between 0 and 1 (uniform distribution):
###Code
np.random.rand(3,4)
###Output
_____no_output_____
###Markdown
Here's a 3x4 matrix containing random floats sampled from a univariate [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution) (Gaussian distribution) of mean 0 and variance 1:
###Code
np.random.randn(3,4)
###Output
_____no_output_____
###Markdown
To give you a feel of what these distributions look like, let's use matplotlib (see the [matplotlib tutorial](tools_matplotlib.ipynb) for more details):
###Code
%matplotlib inline
import matplotlib.pyplot as plt
plt.hist(np.random.rand(100000), normed=True, bins=100, histtype="step", color="blue", label="rand")
plt.hist(np.random.randn(100000), normed=True, bins=100, histtype="step", color="red", label="randn")
plt.axis([-2.5, 2.5, 0, 1.1])
plt.legend(loc = "upper left")
plt.title("Random distributions")
plt.xlabel("Value")
plt.ylabel("Density")
plt.show()
###Output
_____no_output_____
###Markdown
np.fromfunctionYou can also initialize an `ndarray` using a function:
###Code
def my_function(z, y, x):
return x * y + z
np.fromfunction(my_function, (3, 2, 10))
###Output
_____no_output_____
###Markdown
NumPy first creates three `ndarrays` (one per dimension), each of shape `(2, 10)`. Each array has values equal to the coordinate along a specific axis. For example, all elements in the `z` array are equal to their z-coordinate: [[[ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]] [[ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.] [ 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.]] [[ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.] [ 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.]]]So the terms x, y and z in the expression `x * y + z` above are in fact `ndarray`s (we will discuss arithmetic operations on arrays below). The point is that the function `my_function` is only called *once*, instead of once per element. This makes initialization very efficient. Array data `dtype`NumPy's `ndarray`s are also efficient in part because all their elements must have the same type (usually numbers).You can check what the data type is by looking at the `dtype` attribute:
###Code
c = np.arange(1, 5)
print(c.dtype, c)
c = np.arange(1.0, 5.0)
print(c.dtype, c)
###Output
float64 [ 1. 2. 3. 4.]
###Markdown
Instead of letting NumPy guess what data type to use, you can set it explicitly when creating an array by setting the `dtype` parameter:
###Code
d = np.arange(1, 5, dtype=np.complex64)
print(d.dtype, d)
###Output
complex64 [ 1.+0.j 2.+0.j 3.+0.j 4.+0.j]
###Markdown
Available data types include `int8`, `int16`, `int32`, `int64`, `uint8`|`16`|`32`|`64`, `float16`|`32`|`64` and `complex64`|`128`. Check out [the documentation](http://docs.scipy.org/doc/numpy-1.10.1/user/basics.types.html) for the full list. `itemsize`The `itemsize` attribute returns the size (in bytes) of each item:
###Code
e = np.arange(1, 5, dtype=np.complex64)
e.itemsize
###Output
_____no_output_____
###Markdown
`data` bufferAn array's data is actually stored in memory as a flat (one dimensional) byte buffer. It is available *via* the `data` attribute (you will rarely need it, though).
###Code
f = np.array([[1,2],[1000, 2000]], dtype=np.int32)
f.data
###Output
_____no_output_____
###Markdown
In python 2, `f.data` is a buffer. In python 3, it is a memoryview.
###Code
if (hasattr(f.data, "tobytes")):
data_bytes = f.data.tobytes() # python 3
else:
data_bytes = memoryview(f.data).tobytes() # python 2
data_bytes
###Output
_____no_output_____
###Markdown
Several `ndarrays` can share the same data buffer, meaning that modifying one will also modify the others. We will see an example in a minute. Reshaping an array In placeChanging the shape of an `ndarray` is as simple as setting its `shape` attribute. However, the array's size must remain the same.
###Code
g = np.arange(24)
print(g)
print("Rank:", g.ndim)
g.shape = (6, 4)
print(g)
print("Rank:", g.ndim)
g.shape = (2, 3, 4)
print(g)
print("Rank:", g.ndim)
###Output
[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]
Rank: 3
###Markdown
`reshape`The `reshape` function returns a new `ndarray` object pointing at the *same* data. This means that modifying one array will also modify the other.
###Code
g2 = g.reshape(4,6)
print(g2)
print("Rank:", g2.ndim)
###Output
[[ 0 1 2 3 4 5]
[ 6 7 8 9 10 11]
[12 13 14 15 16 17]
[18 19 20 21 22 23]]
Rank: 2
###Markdown
Set item at row 1, col 2 to 999 (more about indexing below).
###Code
g2[1, 2] = 999
g2
###Output
_____no_output_____
###Markdown
The corresponding element in `g` has been modified.
###Code
g
###Output
_____no_output_____
###Markdown
`ravel`Finally, the `ravel` function returns a new one-dimensional `ndarray` that also points to the same data:
###Code
g.ravel()
###Output
_____no_output_____
###Markdown
Arithmetic operationsAll the usual arithmetic operators (`+`, `-`, `*`, `/`, `//`, `**`, etc.) can be used with `ndarray`s. They apply *elementwise*:
###Code
a = np.array([14, 23, 32, 41])
b = np.array([5, 4, 3, 2])
print("a + b =", a + b)
print("a - b =", a - b)
print("a * b =", a * b)
print("a / b =", a / b)
print("a // b =", a // b)
print("a % b =", a % b)
print("a ** b =", a ** b)
###Output
a + b = [19 27 35 43]
a - b = [ 9 19 29 39]
a * b = [70 92 96 82]
a / b = [ 2.8 5.75 10.66666667 20.5 ]
a // b = [ 2 5 10 20]
a % b = [4 3 2 1]
a ** b = [537824 279841 32768 1681]
###Markdown
Note that the multiplication is *not* a matrix multiplication. We will discuss matrix operations below.The arrays must have the same shape. If they do not, NumPy will apply the *broadcasting rules*. Broadcasting In general, when NumPy expects arrays of the same shape but finds that this is not the case, it applies the so-called *broadcasting* rules: First rule*If the arrays do not have the same rank, then a 1 will be prepended to the smaller ranking arrays until their ranks match.*
###Code
h = np.arange(5).reshape(1, 1, 5)
h
###Output
_____no_output_____
###Markdown
Now let's try to add a 1D array of shape `(5,)` to this 3D array of shape `(1,1,5)`. Applying the first rule of broadcasting!
###Code
h + [10, 20, 30, 40, 50] # same as: h + [[[10, 20, 30, 40, 50]]]
###Output
_____no_output_____
###Markdown
Second rule*Arrays with a 1 along a particular dimension act as if they had the size of the array with the largest shape along that dimension. The value of the array element is repeated along that dimension.*
###Code
k = np.arange(6).reshape(2, 3)
k
###Output
_____no_output_____
###Markdown
Let's try to add a 2D array of shape `(2,1)` to this 2D `ndarray` of shape `(2, 3)`. NumPy will apply the second rule of broadcasting:
###Code
k + [[100], [200]] # same as: k + [[100, 100, 100], [200, 200, 200]]
###Output
_____no_output_____
###Markdown
Combining rules 1 & 2, we can do this:
###Code
k + [100, 200, 300] # after rule 1: [[100, 200, 300]], and after rule 2: [[100, 200, 300], [100, 200, 300]]
###Output
_____no_output_____
###Markdown
And also, very simply:
###Code
k + 1000 # same as: k + [[1000, 1000, 1000], [1000, 1000, 1000]]
###Output
_____no_output_____
###Markdown
Third rule*After rules 1 & 2, the sizes of all arrays must match.*
###Code
try:
k + [33, 44]
except ValueError as e:
print(e)
###Output
operands could not be broadcast together with shapes (2,3) (2,)
###Markdown
Broadcasting rules are used in many NumPy operations, not just arithmetic operations, as we will see below.For more details about broadcasting, check out [the documentation](https://docs.scipy.org/doc/numpy-dev/user/basics.broadcasting.html). UpcastingWhen trying to combine arrays with different `dtype`s, NumPy will *upcast* to a type capable of handling all possible values (regardless of what the *actual* values are).
###Code
k1 = np.arange(0, 5, dtype=np.uint8)
print(k1.dtype, k1)
k2 = k1 + np.array([5, 6, 7, 8, 9], dtype=np.int8)
print(k2.dtype, k2)
###Output
int16 [ 5 7 9 11 13]
###Markdown
Note that `int16` is required to represent all *possible* `int8` and `uint8` values (from -128 to 255), even though in this case a uint8 would have sufficed.
###Code
k3 = k1 + 1.5
print(k3.dtype, k3)
###Output
float64 [ 1.5 2.5 3.5 4.5 5.5]
###Markdown
Conditional operators The conditional operators also apply elementwise:
###Code
m = np.array([20, -5, 30, 40])
m < [15, 16, 35, 36]
###Output
_____no_output_____
###Markdown
And using broadcasting:
###Code
m < 25 # equivalent to m < [25, 25, 25, 25]
###Output
_____no_output_____
###Markdown
This is most useful in conjunction with boolean indexing (discussed below).
###Code
m[m < 25]
###Output
_____no_output_____
###Markdown
Mathematical and statistical functions Many mathematical and statistical functions are available for `ndarray`s. `ndarray` methodsSome functions are simply `ndarray` methods, for example:
###Code
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
print(a)
print("mean =", a.mean())
###Output
[[ -2.5 3.1 7. ]
[ 10. 11. 12. ]]
mean = 6.76666666667
###Markdown
Note that this computes the mean of all elements in the `ndarray`, regardless of its shape.Here are a few more useful `ndarray` methods:
###Code
for func in (a.min, a.max, a.sum, a.prod, a.std, a.var):
print(func.__name__, "=", func())
###Output
min = -2.5
max = 12.0
sum = 40.6
prod = -71610.0
std = 5.08483584352
var = 25.8555555556
###Markdown
These functions accept an optional argument `axis` which lets you ask for the operation to be performed on elements along the given axis. For example:
###Code
c=np.arange(24).reshape(2,3,4)
c
c.sum(axis=0) # sum across matrices
c.sum(axis=1) # sum across rows
###Output
_____no_output_____
###Markdown
You can also sum over multiple axes:
###Code
c.sum(axis=(0,2)) # sum across matrices and columns
0+1+2+3 + 12+13+14+15, 4+5+6+7 + 16+17+18+19, 8+9+10+11 + 20+21+22+23
###Output
_____no_output_____
###Markdown
Universal functionsNumPy also provides fast elementwise functions called *universal functions*, or **ufunc**. They are vectorized wrappers of simple functions. For example `square` returns a new `ndarray` which is a copy of the original `ndarray` except that each element is squared:
###Code
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
np.square(a)
###Output
_____no_output_____
###Markdown
Here are a few more useful unary ufuncs:
###Code
print("Original ndarray")
print(a)
for func in (np.abs, np.sqrt, np.exp, np.log, np.sign, np.ceil, np.modf, np.isnan, np.cos):
print("\n", func.__name__)
print(func(a))
###Output
Original ndarray
[[ -2.5 3.1 7. ]
[ 10. 11. 12. ]]
absolute
[[ 2.5 3.1 7. ]
[ 10. 11. 12. ]]
sqrt
[[ nan 1.76068169 2.64575131]
[ 3.16227766 3.31662479 3.46410162]]
exp
[[ 8.20849986e-02 2.21979513e+01 1.09663316e+03]
[ 2.20264658e+04 5.98741417e+04 1.62754791e+05]]
log
[[ nan 1.13140211 1.94591015]
[ 2.30258509 2.39789527 2.48490665]]
sign
[[-1. 1. 1.]
[ 1. 1. 1.]]
ceil
[[ -2. 4. 7.]
[ 10. 11. 12.]]
modf
(array([[-0.5, 0.1, 0. ],
[ 0. , 0. , 0. ]]), array([[ -2., 3., 7.],
[ 10., 11., 12.]]))
isnan
[[False False False]
[False False False]]
cos
[[-0.80114362 -0.99913515 0.75390225]
[-0.83907153 0.0044257 0.84385396]]
###Markdown
Binary ufuncsThere are also many binary ufuncs, that apply elementwise on two `ndarray`s. Broadcasting rules are applied if the arrays do not have the same shape:
###Code
a = np.array([1, -2, 3, 4])
b = np.array([2, 8, -1, 7])
np.add(a, b) # equivalent to a + b
np.greater(a, b) # equivalent to a > b
np.maximum(a, b)
np.copysign(a, b)
###Output
_____no_output_____
###Markdown
Array indexing One-dimensional arraysOne-dimensional NumPy arrays can be accessed more or less like regular python arrays:
###Code
a = np.array([1, 5, 3, 19, 13, 7, 3])
a[3]
a[2:5]
a[2:-1]
a[:2]
a[2::2]
a[::-1]
###Output
_____no_output_____
###Markdown
Of course, you can modify elements:
###Code
a[3]=999
a
###Output
_____no_output_____
###Markdown
You can also modify an `ndarray` slice:
###Code
a[2:5] = [997, 998, 999]
a
###Output
_____no_output_____
###Markdown
Differences with regular python arraysContrary to regular python arrays, if you assign a single value to an `ndarray` slice, it is copied across the whole slice, thanks to broadcasting rules discussed above.
###Code
a[2:5] = -1
a
###Output
_____no_output_____
###Markdown
Also, you cannot grow or shrink `ndarray`s this way:
###Code
try:
a[2:5] = [1,2,3,4,5,6] # too long
except ValueError as e:
print(e)
###Output
cannot copy sequence with size 6 to array axis with dimension 3
###Markdown
You cannot delete elements either:
###Code
try:
del a[2:5]
except ValueError as e:
print(e)
###Output
cannot delete array elements
###Markdown
Last but not least, `ndarray` **slices are actually *views*** on the same data buffer. This means that if you create a slice and modify it, you are actually going to modify the original `ndarray` as well!
###Code
a_slice = a[2:6]
a_slice[1] = 1000
a # the original array was modified!
a[3] = 2000
a_slice # similarly, modifying the original array modifies the slice!
###Output
_____no_output_____
###Markdown
If you want a copy of the data, you need to use the `copy` method:
###Code
another_slice = a[2:6].copy()
another_slice[1] = 3000
a # the original array is untouched
a[3] = 4000
another_slice # similary, modifying the original array does not affect the slice copy
###Output
_____no_output_____
###Markdown
Multi-dimensional arraysMulti-dimensional arrays can be accessed in a similar way by providing an index or slice for each axis, separated by commas:
###Code
b = np.arange(48).reshape(4, 12)
b
b[1, 2] # row 1, col 2
b[1, :] # row 1, all columns
b[:, 1] # all rows, column 1
###Output
_____no_output_____
###Markdown
**Caution**: note the subtle difference between these two expressions:
###Code
b[1, :]
b[1:2, :]
###Output
_____no_output_____
###Markdown
The first expression returns row 1 as a 1D array of shape `(12,)`, while the second returns that same row as a 2D array of shape `(1, 12)`. Fancy indexingYou may also specify a list of indices that you are interested in. This is referred to as *fancy indexing*.
###Code
b[(0,2), 2:5] # rows 0 and 2, columns 2 to 4 (5-1)
b[:, (-1, 2, -1)] # all rows, columns -1 (last), 2 and -1 (again, and in this order)
###Output
_____no_output_____
###Markdown
If you provide multiple index arrays, you get a 1D `ndarray` containing the values of the elements at the specified coordinates.
###Code
b[(-1, 2, -1, 2), (5, 9, 1, 9)] # returns a 1D array with b[-1, 5], b[2, 9], b[-1, 1] and b[2, 9] (again)
###Output
_____no_output_____
###Markdown
Higher dimensionsEverything works just as well with higher dimensional arrays, but it's useful to look at a few examples:
###Code
c = b.reshape(4,2,6)
c
c[2, 1, 4] # matrix 2, row 1, col 4
c[2, :, 3] # matrix 2, all rows, col 3
###Output
_____no_output_____
###Markdown
If you omit coordinates for some axes, then all elements in these axes are returned:
###Code
c[2, 1] # Return matrix 2, row 1, all columns. This is equivalent to c[2, 1, :]
###Output
_____no_output_____
###Markdown
Ellipsis (`...`)You may also write an ellipsis (`...`) to ask that all non-specified axes be entirely included.
###Code
c[2, ...] # matrix 2, all rows, all columns. This is equivalent to c[2, :, :]
c[2, 1, ...] # matrix 2, row 1, all columns. This is equivalent to c[2, 1, :]
c[2, ..., 3] # matrix 2, all rows, column 3. This is equivalent to c[2, :, 3]
c[..., 3] # all matrices, all rows, column 3. This is equivalent to c[:, :, 3]
###Output
_____no_output_____
###Markdown
Boolean indexingYou can also provide an `ndarray` of boolean values on one axis to specify the indices that you want to access.
###Code
b = np.arange(48).reshape(4, 12)
b
rows_on = np.array([True, False, True, False])
b[rows_on, :] # Rows 0 and 2, all columns. Equivalent to b[(0, 2), :]
cols_on = np.array([False, True, False] * 4)
b[:, cols_on] # All rows, columns 1, 4, 7 and 10
###Output
_____no_output_____
###Markdown
`np.ix_`You cannot use boolean indexing this way on multiple axes, but you can work around this by using the `ix_` function:
###Code
b[np.ix_(rows_on, cols_on)]
np.ix_(rows_on, cols_on)
###Output
_____no_output_____
###Markdown
If you use a boolean array that has the same shape as the `ndarray`, then you get in return a 1D array containing all the values that have `True` at their coordinate. This is generally used along with conditional operators:
###Code
b[b % 3 == 1]
###Output
_____no_output_____
###Markdown
IteratingIterating over `ndarray`s is very similar to iterating over regular python arrays. Note that iterating over multidimensional arrays is done with respect to the first axis.
###Code
c = np.arange(24).reshape(2, 3, 4) # A 3D array (composed of two 3x4 matrices)
c
for m in c:
print("Item:")
print(m)
for i in range(len(c)): # Note that len(c) == c.shape[0]
print("Item:")
print(c[i])
###Output
Item:
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
Item:
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]
###Markdown
If you want to iterate on *all* elements in the `ndarray`, simply iterate over the `flat` attribute:
###Code
for i in c.flat:
print("Item:", i)
###Output
Item: 0
Item: 1
Item: 2
Item: 3
Item: 4
Item: 5
Item: 6
Item: 7
Item: 8
Item: 9
Item: 10
Item: 11
Item: 12
Item: 13
Item: 14
Item: 15
Item: 16
Item: 17
Item: 18
Item: 19
Item: 20
Item: 21
Item: 22
Item: 23
###Markdown
Stacking arraysIt is often useful to stack together different arrays. NumPy offers several functions to do just that. Let's start by creating a few arrays.
###Code
q1 = np.full((3,4), 1.0)
q1
q2 = np.full((4,4), 2.0)
q2
q3 = np.full((3,4), 3.0)
q3
###Output
_____no_output_____
###Markdown
`vstack`Now let's stack them vertically using `vstack`:
###Code
q4 = np.vstack((q1, q2, q3))
q4
q4.shape
###Output
_____no_output_____
###Markdown
This was possible because q1, q2 and q3 all have the same shape (except for the vertical axis, but that's ok since we are stacking on that axis). `hstack`We can also stack arrays horizontally using `hstack`:
###Code
q5 = np.hstack((q1, q3))
q5
q5.shape
###Output
_____no_output_____
###Markdown
This is possible because q1 and q3 both have 3 rows. But since q2 has 4 rows, it cannot be stacked horizontally with q1 and q3:
###Code
try:
q5 = np.hstack((q1, q2, q3))
except ValueError as e:
print(e)
###Output
all the input array dimensions except for the concatenation axis must match exactly
###Markdown
`concatenate`The `concatenate` function stacks arrays along any given existing axis.
###Code
q7 = np.concatenate((q1, q2, q3), axis=0) # Equivalent to vstack
q7
q7.shape
###Output
_____no_output_____
###Markdown
As you might guess, `hstack` is equivalent to calling `concatenate` with `axis=1`. `stack`The `stack` function stacks arrays along a new axis. All arrays have to have the same shape.
###Code
q8 = np.stack((q1, q3))
q8
q8.shape
###Output
_____no_output_____
###Markdown
Splitting arraysSplitting is the opposite of stacking. For example, let's use the `vsplit` function to split a matrix vertically.First let's create a 6x4 matrix:
###Code
r = np.arange(24).reshape(6,4)
r
###Output
_____no_output_____
###Markdown
Now let's split it in three equal parts, vertically:
###Code
r1, r2, r3 = np.vsplit(r, 3)
r1
r2
r3
###Output
_____no_output_____
###Markdown
There is also a `split` function which splits an array along any given axis. Calling `vsplit` is equivalent to calling `split` with `axis=0`. There is also an `hsplit` function, equivalent to calling `split` with `axis=1`:
###Code
r4, r5 = np.hsplit(r, 2)
r4
r5
###Output
_____no_output_____
###Markdown
Transposing arraysThe `transpose` method creates a new view on an `ndarray`'s data, with axes permuted in the given order.For example, let's create a 3D array:
###Code
t = np.arange(24).reshape(4,2,3)
t
###Output
_____no_output_____
###Markdown
Now let's create an `ndarray` such that the axes `0, 1, 2` (depth, height, width) are re-ordered to `1, 2, 0` (depth→width, height→depth, width→height):
###Code
t1 = t.transpose((1,2,0))
t1
t1.shape
###Output
_____no_output_____
###Markdown
By default, `transpose` reverses the order of the dimensions:
###Code
t2 = t.transpose() # equivalent to t.transpose((2, 1, 0))
t2
t2.shape
###Output
_____no_output_____
###Markdown
NumPy provides a convenience function `swapaxes` to swap two axes. For example, let's create a new view of `t` with depth and height swapped:
###Code
t3 = t.swapaxes(0,1) # equivalent to t.transpose((1, 0, 2))
t3
t3.shape
###Output
_____no_output_____
###Markdown
Linear algebraNumPy 2D arrays can be used to represent matrices efficiently in python. We will just quickly go through some of the main matrix operations available. For more details about Linear Algebra, vectors and matrics, go through the [Linear Algebra tutorial](math_linear_algebra.ipynb). Matrix transposeThe `T` attribute is equivalent to calling `transpose()` when the rank is ≥2:
###Code
m1 = np.arange(10).reshape(2,5)
m1
m1.T
###Output
_____no_output_____
###Markdown
The `T` attribute has no effect on rank 0 (empty) or rank 1 arrays:
###Code
m2 = np.arange(5)
m2
m2.T
###Output
_____no_output_____
###Markdown
We can get the desired transposition by first reshaping the 1D array to a single-row matrix (2D):
###Code
m2r = m2.reshape(1,5)
m2r
m2r.T
###Output
_____no_output_____
###Markdown
Matrix dot productLet's create two matrices and execute a matrix [dot product](https://en.wikipedia.org/wiki/Dot_product) using the `dot` method.
###Code
n1 = np.arange(10).reshape(2, 5)
n1
n2 = np.arange(15).reshape(5,3)
n2
n1.dot(n2)
###Output
_____no_output_____
###Markdown
**Caution**: as mentionned previously, `n1*n2` is *not* a dot product, it is an elementwise product. Matrix inverse and pseudo-inverseMany of the linear algebra functions are available in the `numpy.linalg` module, in particular the `inv` function to compute a square matrix's inverse:
###Code
import numpy.linalg as linalg
m3 = np.array([[1,2,3],[5,7,11],[21,29,31]])
m3
linalg.inv(m3)
###Output
_____no_output_____
###Markdown
You can also compute the [pseudoinverse](https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_pseudoinverse) using `pinv`:
###Code
linalg.pinv(m3)
###Output
_____no_output_____
###Markdown
Identity matrixThe product of a matrix by its inverse returns the identiy matrix (with small floating point errors):
###Code
m3.dot(linalg.inv(m3))
###Output
_____no_output_____
###Markdown
You can create an identity matrix of size NxN by calling `eye`:
###Code
np.eye(3)
###Output
_____no_output_____
###Markdown
QR decompositionThe `qr` function computes the [QR decomposition](https://en.wikipedia.org/wiki/QR_decomposition) of a matrix:
###Code
q, r = linalg.qr(m3)
q
r
q.dot(r) # q.r equals m3
###Output
_____no_output_____
###Markdown
DeterminantThe `det` function computes the [matrix determinant](https://en.wikipedia.org/wiki/Determinant):
###Code
linalg.det(m3) # Computes the matrix determinant
###Output
_____no_output_____
###Markdown
Eigenvalues and eigenvectorsThe `eig` function computes the [eigenvalues and eigenvectors](https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors) of a square matrix:
###Code
eigenvalues, eigenvectors = linalg.eig(m3)
eigenvalues # λ
eigenvectors # v
m3.dot(eigenvectors) - eigenvalues * eigenvectors # m3.v - λ*v = 0
###Output
_____no_output_____
###Markdown
Singular Value DecompositionThe `svd` function takes a matrix and returns its [singular value decomposition](https://en.wikipedia.org/wiki/Singular_value_decomposition):
###Code
m4 = np.array([[1,0,0,0,2], [0,0,3,0,0], [0,0,0,0,0], [0,2,0,0,0]])
m4
U, S_diag, V = linalg.svd(m4)
U
S_diag
###Output
_____no_output_____
###Markdown
The `svd` function just returns the values in the diagonal of Σ, but we want the full Σ matrix, so let's create it:
###Code
S = np.zeros((4, 5))
S[np.diag_indices(4)] = S_diag
S # Σ
V
U.dot(S).dot(V) # U.Σ.V == m4
###Output
_____no_output_____
###Markdown
Diagonal and trace
###Code
np.diag(m3) # the values in the diagonal of m3 (top left to bottom right)
np.trace(m3) # equivalent to np.diag(m3).sum()
###Output
_____no_output_____
###Markdown
Solving a system of linear scalar equations The `solve` function solves a system of linear scalar equations, such as:* $2x + 6y = 6$* $5x + 3y = -9$
###Code
coeffs = np.array([[2, 6], [5, 3]])
depvars = np.array([6, -9])
solution = linalg.solve(coeffs, depvars)
solution
###Output
_____no_output_____
###Markdown
Let's check the solution:
###Code
coeffs.dot(solution), depvars # yep, it's the same
###Output
_____no_output_____
###Markdown
Looks good! Another way to check the solution:
###Code
np.allclose(coeffs.dot(solution), depvars)
###Output
_____no_output_____
###Markdown
VectorizationInstead of executing operations on individual array items, one at a time, your code is much more efficient if you try to stick to array operations. This is called *vectorization*. This way, you can benefit from NumPy's many optimizations.For example, let's say we want to generate a 768x1024 array based on the formula $sin(xy/40.5)$. A **bad** option would be to do the math in python using nested loops:
###Code
import math
data = np.empty((768, 1024))
for y in range(768):
for x in range(1024):
data[y, x] = math.sin(x*y/40.5) # BAD! Very inefficient.
###Output
_____no_output_____
###Markdown
Sure, this works, but it's terribly inefficient since the loops are taking place in pure python. Let's vectorize this algorithm. First, we will use NumPy's `meshgrid` function which generates coordinate matrices from coordinate vectors.
###Code
x_coords = np.arange(0, 1024) # [0, 1, 2, ..., 1023]
y_coords = np.arange(0, 768) # [0, 1, 2, ..., 767]
X, Y = np.meshgrid(x_coords, y_coords)
X
Y
###Output
_____no_output_____
###Markdown
As you can see, both `X` and `Y` are 768x1024 arrays, and all values in `X` correspond to the horizontal coordinate, while all values in `Y` correspond to the the vertical coordinate.Now we can simply compute the result using array operations:
###Code
data = np.sin(X*Y/40.5)
###Output
_____no_output_____
###Markdown
Now we can plot this data using matplotlib's `imshow` function (see the [matplotlib tutorial](tools_matplotlib.ipynb)).
###Code
import matplotlib.pyplot as plt
import matplotlib.cm as cm
fig = plt.figure(1, figsize=(7, 6))
plt.imshow(data, cmap=cm.hot, interpolation="bicubic")
plt.show()
###Output
_____no_output_____
###Markdown
Saving and loadingNumPy makes it easy to save and load `ndarray`s in binary or text format. Binary `.npy` formatLet's create a random array and save it.
###Code
a = np.random.rand(2,3)
a
np.save("my_array", a)
###Output
_____no_output_____
###Markdown
Done! Since the file name contains no file extension was provided, NumPy automatically added `.npy`. Let's take a peek at the file content:
###Code
with open("my_array.npy", "rb") as f:
content = f.read()
content
###Output
_____no_output_____
###Markdown
To load this file into a NumPy array, simply call `load`:
###Code
a_loaded = np.load("my_array.npy")
a_loaded
###Output
_____no_output_____
###Markdown
Text formatLet's try saving the array in text format:
###Code
np.savetxt("my_array.csv", a)
###Output
_____no_output_____
###Markdown
Now let's look at the file content:
###Code
with open("my_array.csv", "rt") as f:
print(f.read())
###Output
4.130797191668116319e-01 2.093338525574361952e-01 3.202558143634371968e-01
1.985351449843368865e-01 4.080009972772735694e-01 6.038286965726977762e-01
###Markdown
This is a CSV file with tabs as delimiters. You can set a different delimiter:
###Code
np.savetxt("my_array.csv", a, delimiter=",")
###Output
_____no_output_____
###Markdown
To load this file, just use `loadtxt`:
###Code
a_loaded = np.loadtxt("my_array.csv", delimiter=",")
a_loaded
###Output
_____no_output_____
###Markdown
Zipped `.npz` formatIt is also possible to save multiple arrays in one zipped file:
###Code
b = np.arange(24, dtype=np.uint8).reshape(2, 3, 4)
b
np.savez("my_arrays", my_a=a, my_b=b)
###Output
_____no_output_____
###Markdown
Again, let's take a peek at the file content. Note that the `.npz` file extension was automatically added.
###Code
with open("my_arrays.npz", "rb") as f:
content = f.read()
repr(content)[:180] + "[...]"
###Output
_____no_output_____
###Markdown
You then load this file like so:
###Code
my_arrays = np.load("my_arrays.npz")
my_arrays
###Output
_____no_output_____
###Markdown
This is a dict-like object which loads the arrays lazily:
###Code
my_arrays.keys()
my_arrays["my_a"]
###Output
_____no_output_____
###Markdown
基本概念Numpy 的核心是连续的多维数组。 Numpy中的数组叫做```np.ndarray```,也可以使用别名```np.array```。 但这里的np.array与Python标准库中的array.array是不同的。下列是几个```ndarray```中的重要属性: **ndarray.ndim** 数组的维数。 **ndarray.shape** 数组的形状。 **ndarray.size** 数组的元素个数。 **ndarray.dtype** 数组的元素类型。 例子:
###Code
import numpy as np
data=np.arange(15).reshape(3,5)
print(data)
print(data.shape)
print(data.ndim)
print(data.size)
print(data.dtype.name)
###Output
[[ 0 1 2 3 4]
[ 5 6 7 8 9]
[10 11 12 13 14]]
(3, 5)
2
15
int32
###Markdown
数组的创建创建数组有多种方式。你可以使用```np.array```直接用Python的元组和列表来创建。
###Code
import numpy as np
a=np.array([1,2,3])
print(a.dtype)
b=np.array([1.1,2.2,3.3])
print(b.dtype)
c=np.array([(1,2,3),(4.5,5,6)]) #创建二维数组
print(c)
d=np.array([(1,2),(3,4)],dtype=complex) #数组的类型可以在创建时显式声明
print(d)
###Output
int32
float64
[[ 1. 2. 3. ]
[ 4.5 5. 6. ]]
[[ 1.+0.j 2.+0.j]
[ 3.+0.j 4.+0.j]]
###Markdown
通常,数组的元素的未知的,但是形状确实已知的。所以NumPy提供了多种创建空数组的方法。 ```np.zeros``` 创建全是0的数组。 ```np.ones``` 创建全是1的数组。 ```np.empty``` 创建初始值是随机数的数组。 需要注意的是上述方法创建的数组元素的类型是 ```float64```
###Code
e=np.zeros((3,4))
print(e)
f=np.ones((2,3,4),dtype=np.int16)#可以更改数据类型
print(f)
g=np.empty((2,3))
print(g)
###Output
[[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]
[ 0. 0. 0. 0.]]
[[[1 1 1 1]
[1 1 1 1]
[1 1 1 1]]
[[1 1 1 1]
[1 1 1 1]
[1 1 1 1]]]
[[ 1. 2. 3. ]
[ 4.5 5. 6. ]]
###Markdown
为了创建列表,NumPy提供了和 ```range``` 类似的函数。 ```np.arange(start,end,step)```
###Code
a=np.arange(10,30,5)
print(type(a))
b=np.arange(0,2,0.3)#同样可以接收浮点数
print(b)
###Output
<class 'numpy.ndarray'>
[0. 0.3 0.6 0.9 1.2 1.5 1.8]
###Markdown
在生成浮点数列表时,最好不要使用```np.arange```,而是使用```np.linspace```。 ```np.linspace(start,stop,num)```
###Code
np.linspace(0,2,9)
###Output
_____no_output_____
###Markdown
打印数组当你打印一个数组时,NumPy显示数组的方式和嵌套的列表类似,但是会遵循以下布局: - 最后一维从左到右显示 - 第二维到最后一维从上到下显示 - 剩下的同样从上到下显示,以空行分隔 一维数组显示成一行,二维数组显示成矩阵,三维数组显示成矩阵的列表。
###Code
# a=np.arange(6)
# print(a)
# b=np.arange(12).reshape(4,3)
# print(b)
c=np.arange(24).reshape(2,3,4)
print(c)
###Output
[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]
###Markdown
当一个数组元素太多,不方便显示时,NumPy会自动数组的中间部分,只显示边角的数据。
###Code
print(np.arange(10000))
###Output
[ 0 1 2 ..., 9997 9998 9999]
###Markdown
基本操作数组的算数计算是在元素层级运算的。计算结果会存在一个新创建的数组中。
###Code
import numpy as np
a=np.array([20,30,40,50])
b=np.arange(4)
print(b)
c=a-b
print(c)
print(b**2)
print(10*np.sin(a))
print(a<35)
###Output
[0 1 2 3]
[20 29 38 47]
[0 1 4 9]
[ 9.12945251 -9.88031624 7.4511316 -2.62374854]
[ True True False False]
###Markdown
在NumPy中```*```号仍然表示乘法,矩阵乘积用```np.dot```来计算。
###Code
A=np.array([(1,1),(0,1)])
B=np.array([(2,0),(3,4)])
print(A*B)
print(A.dot(B))
print(np.dot(A,B))
###Output
[[2 0]
[0 4]]
[[5 4]
[3 4]]
[[5 4]
[3 4]]
###Markdown
类似于```+=```和```*=```的运算是直接在现有数组上计算的,没有创建新的数组。Numpy中的计算同样也是向上转型的,可以简单理解成浮点数和整数运算的结果是浮点数。
###Code
a = np.ones((2,3), dtype=int)
b = np.random.random((2,3))
a*=3
print(a)
b += a
print(b)
# a += b # 浮点数不会自动转换成整数
###Output
[[3 3 3]
[3 3 3]]
[[ 3.36167598 3.63342297 3.22543331]
[ 3.17992397 3.01462584 3.87847828]]
###Markdown
```np.ndarray```提供了许多一元操作。比如数组求和、求最大最小值等。
###Code
a=np.random.random((2,3))
print(a)
print(a.sum())
print(a.mean())
print(a.max())
print(a.min())
###Output
[[ 0.06108727 0.21625055 0.066292 ]
[ 0.20271722 0.93946432 0.37747181]]
1.86328317161
0.310547195269
0.939464322779
0.0610872663968
###Markdown
默认的,这些一元操作是对整个数组进行计算,没有考虑到数组的形状。你可以设置```axis```参数来指定运算方向。```axis```表示第n维(从0开始)。
###Code
b=np.arange(12).reshape(3,4)
print(b);
print(b.sum(axis=0)); #对第0维的元素求和
print(b.sum(axis=1)) #对第1维的元素求和
print(b.min(axis=1))
print(b.cumsum(axis=1)) #对第1维的元素累加求和
###Output
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
[12 15 18 21]
[ 6 22 38]
[0 4 8]
[[ 0 1 3 6]
[ 4 9 15 22]
[ 8 17 27 38]]
###Markdown
广播函数NumPy提供了熟知的数学方法,如:sin、cos、exp等。在NumPy中,这些方法被称作广播函数。这些函数会对数组中的每个元素进行计算,返回计算后的数组。
###Code
B=np.arange(3)
print(B)
print(np.exp(B))
print(np.sqrt(B))
C=np.array([2,-1,4])
print(np.add(B,C))
print(B+C)
###Output
[0 1 2]
[ 1. 2.71828183 7.3890561 ]
[ 0. 1. 1.41421356]
[2 0 6]
[2 0 6]
###Markdown
索引、切片和迭代一维数组可以被索引、切片和迭代,就和Python中的列表一样。
###Code
a=np.arange(10)**3;
print(a)
# print(a[2])
# print(a[2:5])
a[:6:2]=-1000
print(a)
print(a[::-1])
for i in a:
print(i)
###Output
[ 0 1 8 27 64 125 216 343 512 729]
[-1000 1 -1000 27 -1000 125 216 343 512 729]
[ 729 512 343 216 125 -1000 27 -1000 1 -1000]
-1000
1
-1000
27
-1000
125
216
343
512
729
###Markdown
多维数组可以在每一个维度有一个索引,这些索引构成元组来进行访问。
###Code
def f(x,y):
return 10*x+y
b=np.fromfunction(f,(5,4),dtype=int)
print(b)
print(b[2,3])
print(b[0:5,1])
print(b[:,1])
print(b[1:3,:])
###Output
[[ 0 1 2 3]
[10 11 12 13]
[20 21 22 23]
[30 31 32 33]
[40 41 42 43]]
23
[ 1 11 21 31 41]
[ 1 11 21 31 41]
[[10 11 12 13]
[20 21 22 23]]
###Markdown
```...```表示对索引的省略。如下所示:
###Code
c = np.array( [[[ 0, 1, 2], # 三维数组
[ 10, 12, 13]],
[[100,101,102],
[110,112,113]]])
print(c.shape)
print(c[1,...]) # 和 c[1,:,:] 、 c[1]效果相同
print(c[...,2]) # 和c[:,:,2]效果相同
对多维数组的迭代是在第一维进行迭代的。
for row in b:
print(row)
###Output
[0 1 2 3]
[10 11 12 13]
[20 21 22 23]
[30 31 32 33]
[40 41 42 43]
###Markdown
如果需要遍历多维数组的所有元素,可以使用```flat```这个属性。
###Code
for element in b.flat:
print(element)
###Output
0
1
2
3
10
11
12
13
20
21
22
23
30
31
32
33
40
41
42
43
###Markdown
数组形状操作 更改数组的形状有很多种方式可以更改数组的形状。下列的函数都没有对原数组进行更改,而是返回了一个更改后的新数组。 如果一个维度填的是-1,则该维度的形状会自动进行计算
###Code
print(a.reshape(3,-1))
###Output
[[ 5. 0. 9. 5.]
[ 5. 4. 2. 2.]
[ 3. 2. 0. 7.]]
###Markdown
堆砌不同的数组多个数组可以按照不同的轴合在一起
###Code
a=np.floor(10*np.random.random((2,2)))
print(a)
b=np.floor(10*np.random.random((2,2)))
print(b)
print(np.vstack((a,b)))#垂直方向堆砌
print(np.hstack((a,b)))#水平方向堆砌
from numpy import newaxis
print(a[:,newaxis])
###Output
[[7. 6.]
[7. 4.]]
[[9. 5.]
[4. 9.]]
[[7. 6.]
[7. 4.]
[9. 5.]
[4. 9.]]
[[7. 6. 9. 5.]
[7. 4. 4. 9.]]
[[[7. 6.]]
[[7. 4.]]]
###Markdown
将一个数组划分为多个更小的数组使用```hsplit```,```vsplit```可以对数组按照水平方向和垂直方向进行划分。
###Code
a=np.floor(10*np.random.random((2,12)))
print(a)
print(np.hsplit(a,3))
print(np.hsplit(a,(1,2,3)))#在第一列,第二列,第三列进行划分
###Output
[[ 7. 4. 0. 7. 5. 6. 4. 4. 4. 7. 7. 0.]
[ 0. 1. 7. 7. 4. 9. 7. 0. 0. 2. 7. 5.]]
[array([[ 7., 4., 0., 7.],
[ 0., 1., 7., 7.]]), array([[ 5., 6., 4., 4.],
[ 4., 9., 7., 0.]]), array([[ 4., 7., 7., 0.],
[ 0., 2., 7., 5.]])]
[array([[ 7.],
[ 0.]]), array([[ 4.],
[ 1.]]), array([[ 0.],
[ 7.]]), array([[ 7., 5., 6., 4., 4., 4., 7., 7., 0.],
[ 7., 4., 9., 7., 0., 0., 2., 7., 5.]])]
###Markdown
复制和视图当操作数组时,数组的数据有时会复制到新数组中,有时又不会。这通常令初学者感到困难。总的来说有下面三种情况: 不复制简单的赋值不会复制数组的数据。
###Code
a=np.arange(12)
b=a
print(b is a)
b.shape=3,4
print(a.shape)
###Output
True
(3, 4)
###Markdown
视图和浅复制不同的数组可以使用同一份数据,```view```函数在同一份数据上创建了新的数组对象。
###Code
c=a.view()
print(c is a)
print(c.base is a) #c是a的数据的视图
print(c.flags.owndata)
c.shape=6,2
print(a.shape) #a的形状没有改变
c[4,1]=1234 #a的数据改变了
print(a)
###Output
False
True
False
(3, 4)
[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 1234 10 11]]
###Markdown
对数组切片会返回数组的视图
###Code
print(a)
s=a[0:2,0:2]
print(s)
s[:]=10
print(s.base is a)
print(a)
###Output
[[10. 10.]
[10. 10.]]
[[10. 10.]
[10. 10.]]
True
[[10. 10.]
[10. 10.]]
###Markdown
深复制```copy```函数实现了对数据和数组的完全复制。
###Code
d=a.copy()
print(d is a)
print(d.base is a)
d[0,0]=9999
print(a)
###Output
False
False
[[ 0 10 10 3]
[ 4 10 10 7]
[ 8 10 10 11]]
###Markdown
多种多样的索引和索引的小技巧相比Python的列表,NumPy提供了更多的索引功能。除了可以用整数和列表来访问数组之外,数组还可以被整型数组和布尔数组访问。 用数组访问数组
###Code
a=np.arange(12)**2
print(a)
i=np.array([1,1,3,8,5])
print(a[i])
j=np.array([[3,4],[8,5]]) #用二维数组来访问数组
print(a[j]) #产生和访问的数组相 同形状的结果
###Output
[ 0 1 4 9 16 25 36 49 64 81 100 121]
[ 1 1 9 64 25]
[[ 9 16]
[64 25]]
###Markdown
在时间序列的数据上寻找最大值通常会用到数组索引
###Code
time=np.linspace(20,145,5)
data=np.sin(np.arange(20)).reshape(5,4)
print(time)
print(data)
ind=data.argmax(axis=0)#返回按照指定轴的方向的最大值的索引
time_max=time[ind]
print(ind)
print(time_max)
data_max=data[ind,range(data.shape[1])]
print(data_max)
###Output
[ 20. 51.25 82.5 113.75 145. ]
[[ 0. 0.84147098 0.90929743 0.14112001]
[-0.7568025 -0.95892427 -0.2794155 0.6569866 ]
[ 0.98935825 0.41211849 -0.54402111 -0.99999021]
[-0.53657292 0.42016704 0.99060736 0.65028784]
[-0.28790332 -0.96139749 -0.75098725 0.14987721]]
[2 0 3 1]
[ 82.5 20. 113.75 51.25]
[0.98935825 0.84147098 0.99060736 0.6569866 ]
###Markdown
你也可以使用数组索引来赋值
###Code
a=np.arange(5)
a[[1,3,4]]=0
print(a)
###Output
[0 0 2 0 0]
###Markdown
如果赋值时有重复的索引,则赋值会执行多次,留下最后一次执行的结果
###Code
a=np.arange(5)
a[[0,0,0]]=[1,2,3]
print(a)
###Output
[3 1 2 3 4]
###Markdown
但是赋值时使用```+=```时,并不会重复计算
###Code
a=np.arange(5)
a[[0,0,0]]+=1
print(a)
###Output
[1 1 2 3 4]
###Markdown
这是因为"a+=1"最终是解释成了"a=a+1" 用布尔数组来访问数组通过使用布尔数组索引,我们可以选择哪些数据是需要的,哪些是不需要的。 在赋值中也非常有用。
###Code
a = np.arange(12).reshape(3,4)
b = a > 4
print(b)
print(a[b])
a[b]=10
print(a)
###Output
[[False False False False]
[False True True True]
[ True True True True]]
[ 5 6 7 8 9 10 11]
[[ 0 1 2 3]
[ 4 10 10 10]
[10 10 10 10]]
###Markdown
下面的代码用布尔数组索引产生了曼德布洛特集合的图像。
###Code
import numpy as np
import matplotlib.pyplot as plt
def mandelbrot( h,w, maxit=20 ):
"""Returns an image of the Mandelbrot fractal of size (h,w)."""
y,x = np.ogrid[ -1.4:1.4:h*1j, -2:0.8:w*1j ]
c = x+y*1j
z = c
divtime = maxit + np.zeros(z.shape, dtype=int)
for i in range(maxit):
z = z**2 + c
diverge = z*np.conj(z) > 2**2 # who is diverging
div_now = diverge & (divtime==maxit) # who is diverging now
divtime[div_now] = i # note when
z[diverge] = 2 # avoid diverging too much
return divtime
plt.imshow(mandelbrot(400,400))
plt.show()
###Output
_____no_output_____
###Markdown
ix_()函数ix_函数被用来计算不同的向量的乘积。
###Code
a = np.array([2,3,4,5])
b = np.array([8,5,4])
c = np.array([5,4,6,8,3])
ax,bx,cx = np.ix_(a,b,c)
print(ax)
print(bx)
print(cx)
print(ax.shape, bx.shape, cx.shape)
result = ax*bx*cx + ax
print(result)
print(result[3,2,4])
print(a[3]*b[2]*c[4]+a[3])#计算的结果是相同的
###Output
[[[2]]
[[3]]
[[4]]
[[5]]]
[[[8]
[5]
[4]]]
[[[5 4 6 8 3]]]
(4, 1, 1) (1, 3, 1) (1, 1, 5)
[[[ 82 66 98 130 50]
[ 52 42 62 82 32]
[ 42 34 50 66 26]]
[[123 99 147 195 75]
[ 78 63 93 123 48]
[ 63 51 75 99 39]]
[[164 132 196 260 100]
[104 84 124 164 64]
[ 84 68 100 132 52]]
[[205 165 245 325 125]
[130 105 155 205 80]
[105 85 125 165 65]]]
65
65
###Markdown
线性代数提供基本的线性代数操作 简单的数组操作
###Code
import numpy as np
a = np.array([[1.0, 2.0], [3.0, 4.0]])
print(a)
a.transpose()
np.linalg.inv(a)
u = np.eye(2) # unit 2x2 matrix; "eye" represents "I"
j = np.array([[0.0, -1.0], [1.0, 0.0]])
np.dot (j, j) # 点积
np.trace(u) # 矩阵的迹
y = np.array([[5.], [7.]])
print(np.linalg.solve(a, y))#解线性方程组
print(np.linalg.eig(j))#计算特征值
###Output
[[ 1. 2.]
[ 3. 4.]]
[[-3.]
[ 4.]]
(array([ 0.+1.j, 0.-1.j]), array([[ 0.70710678+0.j , 0.70710678-0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]]))
###Markdown
小技巧和小贴士 自动更改数组大小在更改数组大小时,你可以省略一个维度的大小,这个维度的大小会自动计算出来
###Code
a = np.arange(30)
a.shape = 2,-1,3 # -1 表示自动计算大小
print(a.shape)
###Output
(2, 5, 3)
###Markdown
直方图
###Code
import numpy as np
import matplotlib.pyplot as plt
mu,sigma=2,0.5
v=np.random.normal(mu,sigma,10000)
#matplotlib版本
plt.hist(v,bins=100,normed=1)
plt.show()
#NumPy版本
(n, bins) = np.histogram(v, bins=50, normed=True) # NumPy version (no plot)
plt.plot(.5*(bins[1:]+bins[:-1]), n)
plt.show()
###Output
_____no_output_____
###Markdown
the first argument is the starting point and second argument is ending point and third argument is the evenly distributed number of items between starting point and ending point as shown in the example.
###Code
np.linspace(0,50,20)
np.eye(4) # this is used to create identity matrix and the argument defines the number of rows in equal to number of coloum.
np.random.rand(5) # it creates a random sample pf uniform distribution from 0 to 1.
np.random.rand(5,5)
np.random.randn(2) # it returns normal distribution instead of uniform distribution
np.random.randn(4,4)
np.random.randint(1,100)
###Output
_____no_output_____
###Markdown
the first argument is the starting point nd second is ending point and third is for number of random numbers though its not necessary as by default it is 1
###Code
np.random.randint(100,1000,5)
arr = np.arange(25)
arr
ranarr = np.random.randint(0,50,10)
ranarr
arr.reshape(5,5) #it is used to converting a matrix to another matrix like one dimen to two dimen
arr.reshape(5,10)
###Output
_____no_output_____
###Markdown
not enough element to insert in the array
###Code
ranarr
ranarr.max()
ranarr.min()
ranarr.argmax() # index of the maximun value
ranarr.argmin() # index of the minimum value
arr.shape
###Output
_____no_output_____
###Markdown
here it represent one dimentional vector
###Code
arr = arr.reshape(5,5)
arr.shape
###Output
_____no_output_____
###Markdown
this represent the two dimensional arr
###Code
arr.dtype
###Output
_____no_output_____
###Markdown
this means data type is integer 32 bit NUMPY INDEXING AND SELECTION
###Code
arr = np.arange(0,11)
arr
arr[8] #this is called indexing
arr[1:5] # this is called slicing
arr[:6]
arr[5:]
arr[0:5] = 100 # this will broadcast the value. see the example
arr
arr = np.arange(0,11)
arr
slice_of_arr = arr[0:6]
slice_of_arr
slice_of_arr[:] = 99
slice_of_arr
arr
arr_copy = arr.copy()
arr_copy
arr_2d = np.array([[5,10,15],[20,25,30],[35,40,45]])
arr_2d
arr_2d[0][0] # first represent the row and 2nd represent the column.
arr_2d[0]
arr_2d[1][1]
arr_2d[2][1]
arr_2d[2,1] # here comma replaces the double bracket notation to single bracket notation
arr_2d[:2,1:]
arr_2d
arr_2d[1:,1:]
###Output
_____no_output_____
###Markdown
CONDITIONAL SELECTION
###Code
arr = np.arange(1,11)
arr
arr > 5
bool_arr = arr > 5
bool_arr
arr[bool_arr]
arr[arr>5]
arr[arr<3]
arr_2d = np.arange(50).reshape(5,10)
arr_2d
arr_2d[1:3,3:5]
arr = np.arange(0,11)
arr
arr + arr
arr - arr
arr * arr
arr + 100
arr / arr
1 / arr # inf means infinity
arr ** arr
np.sqrt(arr)
np.exp(arr)
np.max(arr)
np.sin(arr)
np.log(arr)
###Output
E:\Anaconda3.5\lib\site-packages\ipykernel_launcher.py:1: RuntimeWarning: divide by zero encountered in log
"""Entry point for launching an IPython kernel.
###Markdown
FOR UNIVERSAL ARRAY FUNCTION --- docs.scipy.org/doc/numpy/reference/ufuncs.html
###Code
np.sum(arr)
###Output
_____no_output_____
###Markdown
NUMPY EXERCISE
###Code
np.zeros(10)
np.ones(10)
fives = np.ones(10)
fives[0:11] = 5
fives
np.arange(10,51)
np.arange(10,51,2)
np.arange(0,9).reshape(3,3)
np.eye(3)
np.random.rand(1)
np.random.randn(25)
np.linspace(0.01,1,100).reshape(10,10)
np.linspace(0,1,20)
num = np.arange(1,26).reshape(5,5)
num
num[2:,1:]
num[3,4]
num[:3,1:2]
num[4:5]
num[3:5]
np.sum(num)
np.std(num)
num.sum(axis=0)
###Output
_____no_output_____ |
notebooks/monte_carlo_dev/CompareOilTypeVolumeByVessel.ipynb | ###Markdown
Compare oil type volumes by vessel
###Code
import pandas
import numpy as np
import matplotlib.pyplot as plt
import yaml
from pathlib import Path
# import functions for querying DOE and monte-carlo dataframes
from monte_carlo_utils import get_montecarlo_oil_byfac, get_montecarlo_oil
from monte_carlo_utils import get_oil_classification, get_DOE_df
from monte_carlo_utils import get_DOE_quantity_byfac, get_DOE_quantity
#~~~~~ User inputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Dept. of Ecology data files
DOE_dir = Path('/Users/rmueller/Data/MIDOSS/DeptOfEcology/')
DOE_2018_xlsx = DOE_dir/'MuellerTrans4-30-20.xlsx'
DOE_2021_xlsx = DOE_dir/'MuellerTrans_5-26-21.xlsx'
# Facility names and lat/lon information file
facilities_xlsx = Path(
'/Users/rmueller/Data/MIDOSS/marine_transport_data/'
'Oil_Transfer_Facilities.xlsx'
)
# 10,000 monte carlo spills
monte_carlo_csv = Path(
'/Users/rmueller/Data/MIDOSS/monte_carlo/'
'SalishSeaOilSpills_fixbarge_10000_1.csv'
)
# Oil Attribution file
oil_attribution_file = Path(
'/Users/rmueller/Data/MIDOSS/marine_transport_data/'
'oil_attribution.yaml'
)
# location of output excel spreadsheets
output_directory = Path(
'/Users/rmueller/Data/MIDOSS/DeptOfEcology/'
)
graphics_directory = Path(
'/Users/rmueller/Projects/MIDOSS/graphics_figures/monte_carlo'
)
# location of output .tex documents for writing tables to file
tex_dir = Path(
'/Users/rmueller/Library/Mobile Documents/com~apple~CloudDocs/'
'Documents/Publications/MIDOSS/MIDOSS_MuellerEtAl_paper1/Methods/'
'notes/python_generated_tables')
data_types = ['total_gallons', 'fraction_of_total', 'number_of_transfers']
# oil types used in our study
oil_types = [
'ANS','Bunker-C','Diesel','Gasoline','Jet Fuel', 'Dilbit', 'Other'
]
oil_colors = [
'orange', 'saddlebrown','darkslateblue',
'steelblue','slateblue','olive',
'darkgoldenrod'
]
# create a color dictionary for oil types to us in pie charts
colordict={}
for l,c in zip(oil_types,oil_colors):
colordict[l]=c
# The precision used to calculate oil type weights
# Trial and error showed that a precision of 2 is neccessary for weights
# to sum to 1.0
precision = 2
# unit conversions
gal2m3 = 0.00378541
###Output
_____no_output_____
###Markdown
Load data files
###Code
# Oil Attribution File
with open(oil_attribution_file) as file:
oil_attrs = yaml.load(file, Loader=yaml.Loader)
# Facility information
facdf = pandas.read_excel(
facilities_xlsx,
sheet_name = 'Washington'
)
# New method converts DOE facility names to monte-carlo facility names
# in `get_DOE_df` function of `monte_carlo_utils.py`, so I now use the
# monte_carlo names uniformly to query both DOE and monte-carlo facilities
facility_names = oil_attrs['categories']['US_origin_destination']
###Output
_____no_output_____
###Markdown
Check to make sure that I got the DOE facility names right
###Code
facdf.head(1)
# load DOE data such that the three terminals that are grouped in our
# origin-destination analysis are re-named to the terminal that they
# are grouped with in our analysis.
df = get_DOE_df(DOE_2018_xlsx, facilities_xlsx, group = 'yes')
# sort by deliverer and receiver
df_del = df.loc[
df.DelivererTypeDescription == 'Facility',
['Deliverer','TransferQtyInGallon']
].groupby('Deliverer').sum().sort_values(by='TransferQtyInGallon', ascending=False)
df_del.reset_index(inplace=True)
df_del = df_del.rename(columns={'Deliverer':'Facility_names'})
df_rec = df.loc[
df.ReceiverTypeDescription == 'Facility',
['Receiver','TransferQtyInGallon']
].groupby('Receiver').sum().sort_values(by='TransferQtyInGallon', ascending=False)
df_rec.reset_index(inplace=True)
df_rec = df_rec.rename(columns={'Receiver':'Facility_names'})
doe_del_facs = df_del['Facility_names'].to_list()
doe_rec_facs = df_rec['Facility_names'].to_list()
print(f'List of {len(doe_del_facs)} facilities that deliver cargo but do not recieve cargo')
print('--------------------------------------------------------------')
length=0
for facility in doe_del_facs:
if facility not in doe_rec_facs:
print(facility)
length +=1
print('')
print(f'List of {len(doe_rec_facs)} facilities that receive cargo but do not deliver cargo')
print('--------------------------------------------------------------')
for facility in doe_rec_facs:
if facility not in doe_del_facs:
print(facility)
length+=1
# merge the two lists
all_facilities = pandas.merge(
left=pandas.DataFrame(doe_del_facs).rename(columns={0:'Facility_names'}),
right=pandas.DataFrame(doe_rec_facs).rename(columns={0:'Facility_names'}),
how='outer',
on='Facility_names'
)
all_facilities = pandas.merge(
left = all_facilities,
right = df_del,
how='left',
on = 'Facility_names'
)
all_facilities = pandas.merge(
left = all_facilities,
right = df_rec,
how='left',
on = 'Facility_names'
).fillna(0)
all_facilities = all_facilities.rename(
columns={
'TransferQtyInGallon_x':'TransferOutGallons',
'TransferQtyInGallon_y':'TransferInGallons'}
)
all_facilities['TransferTotalGallons'] = (
all_facilities['TransferOutGallons'] + \
all_facilities['TransferInGallons']
)
all_facilities['TransferPercent'] = (
100 * all_facilities['TransferTotalGallons']/\
all_facilities['TransferTotalGallons'].sum()
)
all_facilities.loc[
all_facilities['Facility_names'].isin(facility_names),
'In Monte Carlo?'] = 'yes'
all_facilities.loc[
~all_facilities['Facility_names'].isin(facility_names),
'In Monte Carlo?'] = '--'
percent_represented = all_facilities.loc[
all_facilities['In Monte Carlo?'] == 'yes',
['TransferPercent']
].sum()
print(f'{percent_represented.item():.2f}% of WA oil cargo transfers'
' occurs at the marine terminals represented in our study'
)
all_facilities.to_latex(buf=tex_dir/'monte_carlo.tex',float_format="%.2e",index=False)
# all_facilities.to_latex(buf=tex_dir/'monte_carlo.tex',
# formatters={0:'s',
# 1:'.2e',
# 2:'.2e',
# 3:'.2e',
# 4:'3.0f',
# 5:'s'},
# index=False
# )
all_facilities.sort_values(by='TransferTotalGallons', ascending=False)
missing_facilities=all_facilities.loc[
~all_facilities['Facility_names'].isin(facility_names)
].sort_values(by='TransferPercent', ascending=False)
missing_facilities.head()
###Output
_____no_output_____
###Markdown
Get DOE and monte-carlo attributions (both facility transfers and all transfers)
###Code
#--------------------------------------------------------------------------------
# Sum DOE oil transfers to/from facilities by oil and vessel types
#--------------------------------------------------------------------------------
print('Getting DOE volume transfers by Salish Sea facilities')
exports, imports, combined = get_DOE_quantity_byfac(
DOE_2018_xlsx, facilities_xlsx, facilities='selected'
)
#--------------------------------------------------------------------------------
# Sum all DOE oil transfers by oil and vessel types
#--------------------------------------------------------------------------------
print('Getting all DOE volume transfers for WA')
exports_all, imports_all, combined_all = get_DOE_quantity(DOE_2018_xlsx, facilities_xlsx)
#--------------------------------------------------------------------------------
# Sum monte-carlo tank capacities in spills file by vessel and oil types
# to estimate oil type traffic based only on transfers to/from marine facilities
#--------------------------------------------------------------------------------
mc_export = {}
mc_import = {}
mc_allUS = {}
for idx,vessel in enumerate(["tanker","atb","barge"]):
# calculate total cargo_capacity by vessel type and oil type
print(f'Getting monte-carlo {vessel} exports/imports to/from WA marine terminals')
mc_export[vessel], mc_import[vessel] = get_montecarlo_oil_byfac(
vessel,
monte_carlo_csv
)
# calculate total cargo_capacity by vessel type and oil type
print(f'Getting monte-carlo {vessel} representation of US oil transport')
mc_allUS[vessel] = get_montecarlo_oil(
vessel,
monte_carlo_csv
)
# Add entries for oil types that may not be in monte-carlo file
# so that the DOE and monte-carlo information in the same format
# I intend to eventually put this will go into monte_carlo_utils.py
# script if using a key-value pair for both information sources
# will ensure that I don't make an ordering mis-match mistake
for vessel in ["tanker","atb","barge"]:
mc_export[vessel] = dict(mc_export[vessel])
mc_allUS[vessel] = dict(mc_allUS[vessel])
# Add oil types missing in US traffic
# e.g. Dilbit is missing in tanker, ANS is missing in ATBs
for oil in oil_types:
if oil not in mc_export[vessel].keys():
mc_export[vessel][oil] = 0.0
mc_allUS[vessel][oil] = 0.0
###Output
Getting DOE volume transfers by Salish Sea facilities
this code note yet tests with fac_xls as input
this code note yet tests with fac_xls as input
Tallying atb quantities
Tallying barge quantities
Tallying tanker quantities
Getting all DOE volume transfers for WA
this code note yet tests with fac_xls as input
Tallying atb quantities
Tallying barge quantities
Tallying tanker quantities
Getting monte-carlo tanker exports/imports to/from WA marine terminals
Getting monte-carlo tanker representation of US oil transport
Getting monte-carlo atb exports/imports to/from WA marine terminals
Getting monte-carlo atb representation of US oil transport
Getting monte-carlo barge exports/imports to/from WA marine terminals
Getting monte-carlo barge representation of US oil transport
###Markdown
Check: Is there dilbit in monte-carlo
###Code
mcdf = pandas.read_csv(monte_carlo_csv)
mcdf.groupby(
'Lagrangian_template'
).cargo_capacity.sum()
### Dilbit isn't showing up in terminal transfer plots (below). ID source(s)
dilbit = mcdf.loc[
(mcdf.Lagrangian_template == 'Lagrangian_dilbit.dat'),
['vessel_origin','vessel_dest']
]
dilbit.shape
dilbit
###Output
_____no_output_____
###Markdown
Create: Dictionary for percentage values and comparison
###Code
percent_oil_df = {}
percentages = {}
for idx,vessel in enumerate(["tanker", "atb", "barge"]):
percentages = {'DOE': [], 'monte-carlo': [], 'DOE_minus_monte-carlo': []}
percentages['DOE'] = [
100*exports[vessel][oil]/sum(exports[vessel].values()) for oil in exports[vessel].keys()
]
percentages['monte-carlo'] = [
100*mc_export[vessel][oil]/sum(mc_export[vessel].values()) for oil in exports[vessel].keys()
]
percentages['DOE_minus_monte-carlo'] = [
percentages['DOE'][idx] - percentages['monte-carlo'][idx] for idx in range(len(percentages['DOE']))
]
percent_oil_df[vessel] = pandas.DataFrame(
data=percentages,
index=exports[vessel].keys()
).rename_axis(index=f'{vessel} export')
def calc_percent_difference(DOE_dict, MC_dict):
"""
Inputs: dictionaries created by get_montecarlo_oil_byfac,
get_montecarlo_oil, get_DOE_quantity, get_DOE_quantity_byfac
Outputs: Dictionary organized by vessel type with columns corresponding to
oil_type percentages for DOE, monte-carlo csv and difference
"""
percent_oil_df = {}
percentages = {}
for idx,vessel in enumerate(["tanker", "atb", "barge"]):
percentages = {
'DOE': [],
'monte-carlo': [],
'DOE_minus_monte-carlo': []
}
percentages['DOE'] = [
100*DOE_dict[vessel][oil]/sum(DOE_dict[vessel].values())
for oil in DOE_dict[vessel].keys()
]
# note: I'm using the DOE dictionary to loop through oil types
# so that the order of the output lists are identical
percentages['monte-carlo'] = [
100*MC_dict[vessel][oil]/\
sum(MC_dict[vessel].values())
for oil in DOE_dict[vessel].keys()
]
percentages['DOE_minus_monte-carlo'] = [
percentages['DOE'][idx] - \
percentages['monte-carlo'][idx] \
for idx in range(len(percentages['DOE']))
]
percent_oil_df[vessel] = pandas.DataFrame(
data=percentages,
index=exports[vessel].keys()
).rename_axis(index=f'{vessel} export')
return percent_oil_df
###Output
_____no_output_____
###Markdown
--- Plot monte carlo and DOE representation of oil exports by vessel types---
###Code
#--------------------------------------------------------------------------------
# Plot monte-carlo representation of oil export
#--------------------------------------------------------------------------------
# Get cargo exports by vessel type and add up cargo_capacities by oil type
fig, axes = plt.subplots(1, 3, figsize = (15,5))
for idx,vessel in enumerate(["tanker","atb","barge"]):
# add central title
axes[idx].axis('equal')
if idx==1:
axes[idx].set_title(
('Monte Carlo exports FROM TERMINALS according to \n'
'tank capacities and by oil type'),
fontsize=18
)
# plot up results
pie_wedge_collection = axes[idx].pie(
mc_export[vessel].values(),
labels = mc_export[vessel].keys(),
wedgeprops=dict(width=0.5),
textprops={'fontsize': 14}
)
# make colors uniform across subplots
for pie_wedge in pie_wedge_collection[0]:
pie_wedge.set_edgecolor('white')
pie_wedge.set_facecolor(colordict[pie_wedge.get_label()])
axes[idx].axis('off')
axes[idx].text(0,0,vessel,ha='center',fontsize=18)
plt.savefig(graphics_directory/'monte_carlo_oil_exports_v2')
#--------------------------------------------------------------------------------
## Plot department of ecology volume transfers from marine terminals by oil types
#--------------------------------------------------------------------------------
fig, axes = plt.subplots(1, 3, figsize = (15,5))
for idx,vessel in enumerate(["tanker","atb","barge",]):
# add central title
axes[idx].axis('equal')
if idx==1:
axes[idx].set_title(
('DOE exports FROM TERMINALS according to \n'
'total gallons transferred to cargo vessels by oil type'),
fontsize=18
)
# plot up results
pie_wedge_collection = axes[idx].pie(
exports[vessel].values(),
# commented out version
# labels = [
# f'{oil}({100*exports["atb"][oil]/sum(exports["atb"].values()):0.1f}%)' for oil in exports['atb'].keys()
# ],
labels = exports[vessel].keys(),
wedgeprops=dict(width=0.5),
textprops={'fontsize': 14}
)
# make colors uniform across subplots
for pie_wedge in pie_wedge_collection[0]:
pie_wedge.set_edgecolor('white')
pie_wedge.set_facecolor(colordict[pie_wedge.get_label()])
# commented out version parses label to get oil name for color dictionary
#pie_wedge.set_facecolor(colordict[pie_wedge.get_label().split('(')[0]])
axes[idx].axis('off')
axes[idx].text(0,0,vessel,ha='center',fontsize=18)
plt.savefig(graphics_directory/'DOE_oil_exports_byterminals.png')
###Output
_____no_output_____
###Markdown
Print: Percentages relating to differences in exports from marine terminals(correspongind to above graphic)
###Code
percent_test = calc_percent_difference(exports, mc_export)
percent_test
###Output
_____no_output_____
###Markdown
Take aways:- We do tanker exports reasonably well. :-) - We do jet fuel reasonably well for all vessel- We tend to get Bunker-C right for tanker and barges- We show Bunker as the dominant ATB export where DOE shows gasoline (We under attribute gasoline and over-attribute bunker-C in atbs)- We show less Bunker-C in barge export and more deisel than DOE barge export Plausible explanations: - Our vessel join method is biased in which terminals it captures. - Barge tugs may be attributed as ATB tugs and visa versa (hence, attributed differently in our attribution than in DOE data) --- Plot representations of US oil transport according to our monte carlo and DOE oil transfers ---
###Code
#--------------------------------------------------------------------------------
# Plot monte-carlo representation of US oil transport
#--------------------------------------------------------------------------------
fig, axes = plt.subplots(1, 3, figsize = (15,5))
for idx,vessel in enumerate(["tanker","atb","barge"]):
# add central title
axes[idx].axis('equal')
if idx==1:
axes[idx].set_title(
('Monte carlo representation of US oil cargo transport \n'
'according to tank capacities, vessel type and oil types'),
fontsize=18
)
# plot up results
pie_wedge_collection = axes[idx].pie(
mc_allUS[vessel].values(),
labels = mc_allUS[vessel].keys(),
wedgeprops=dict(width=0.5),
textprops={'fontsize': 14}
)
# make colors uniform across subplots
for pie_wedge in pie_wedge_collection[0]:
pie_wedge.set_edgecolor('white')
pie_wedge.set_facecolor(colordict[pie_wedge.get_label()])
axes[idx].axis('off')
axes[idx].text(0,0,vessel,ha='center',fontsize=18)
plt.savefig(graphics_directory/'monte_carlo_USoil')
#--------------------------------------------------------------------------------
## Plot department of ecology volume transfers from marine terminals by oil types
#--------------------------------------------------------------------------------
fig, axes = plt.subplots(1, 3, figsize = (15,5))
for idx,vessel in enumerate(["tanker","atb","barge",]):
# add central title
axes[idx].axis('equal')
if idx==1:
axes[idx].set_title(
('DOE representation of US oil cargo transfers \n'
'according to gallons transferred and sorted by '
'vessel and oil types'),
fontsize=18
)
# plot up results
pie_wedge_collection = axes[idx].pie(
combined_all[vessel].values(),
labels = combined_all[vessel].keys(),
wedgeprops=dict(width=0.5),
textprops={'fontsize': 14}
)
# make colors uniform across subplots
for pie_wedge in pie_wedge_collection[0]:
pie_wedge.set_edgecolor('white')
pie_wedge.set_facecolor(colordict[pie_wedge.get_label()])
axes[idx].axis('off')
axes[idx].text(0,0,vessel,ha='center',fontsize=18)
plt.savefig(graphics_directory/'DOE_oil.png')
###Output
_____no_output_____
###Markdown
Print: Percentages relating to US oil cargo transport (and differences shown in above graphic)
###Code
percent_test = calc_percent_difference(combined_all, mc_allUS)
percent_test
###Output
_____no_output_____
###Markdown
--- Plot DOE representation of oil imports and combined imports/exports for the marine terminals in our study---
###Code
fig, axes = plt.subplots(1, 3, figsize = (15,5))
for idx,vessel in enumerate(["tanker","atb","barge",]):
# calculate total cargo_capacity by vessel type and oil type
net_import = imports[vessel].values()
# add central title
axes[idx].axis('equal')
if idx==1:
axes[idx].set_title(
('DOE imports TO TERMINALS according to \n'
'tank capacities and by oil type'),
fontsize=18
)
# plot up results
pie_wedge_collection = axes[idx].pie(
net_import,
labels = imports[vessel].keys(),
wedgeprops=dict(width=0.5),
textprops={'fontsize': 14}
)
# make colors uniform across subplots
for pie_wedge in pie_wedge_collection[0]:
pie_wedge.set_edgecolor('white')
pie_wedge.set_facecolor(colordict[pie_wedge.get_label()])
axes[idx].axis('off')
axes[idx].text(0,0,vessel,ha='center',fontsize=18)
plt.savefig(graphics_directory/'DOE_oil_imports_byterminals')
fig, axes = plt.subplots(1, 3, figsize = (15,5))
for idx,vessel in enumerate(["tanker","atb","barge",]):
# calculate total cargo_capacity by vessel type and oil type
net_combined = combined[vessel].values()
# add central title
axes[idx].axis('equal')
if idx==1:
axes[idx].set_title(
('DOE imports & exports combined TO/FROM TERMINALS according to \n'
'transfer quantities and sorted by vessel and oil types'),
fontsize=18
)
# plot up results
pie_wedge_collection = axes[idx].pie(
net_combined,
labels = combined[vessel].keys(),
wedgeprops=dict(width=0.5),
textprops={'fontsize': 14}
)
# make colors uniform across subplots
for pie_wedge in pie_wedge_collection[0]:
pie_wedge.set_edgecolor('white')
pie_wedge.set_facecolor(colordict[pie_wedge.get_label()])
axes[idx].axis('off')
axes[idx].text(0,0,vessel,ha='center',fontsize=18)
plt.savefig(graphics_directory/'DOE_oil_combined_byterminals')
###Output
_____no_output_____ |
Python/AbsoluteAndOtherAlgorithms/7GLIOMA/AEFS_64.ipynb | ###Markdown
1. Import libraries
###Code
#----------------------------Reproducible----------------------------------------------------------------------------------------
import numpy as np
import tensorflow as tf
import random as rn
import os
seed=0
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
rn.seed(seed)
#session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
session_conf =tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
#tf.set_random_seed(seed)
tf.compat.v1.set_random_seed(seed)
#sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
K.set_session(sess)
#----------------------------Reproducible----------------------------------------------------------------------------------------
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#--------------------------------------------------------------------------------------------------------------------------------
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
%matplotlib inline
matplotlib.style.use('ggplot')
import random
import scipy.sparse as sparse
import scipy.io
from keras.utils import to_categorical
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from skfeature.function.similarity_based import lap_score
from skfeature.utility import construct_W
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LinearRegression
import time
import pandas as pd
def mse_check(train, val):
LR = LinearRegression(n_jobs = -1)
LR.fit(train[0], train[1])
MSELR = ((LR.predict(val[0]) - val[1]) ** 2).mean()
return MSELR
def next_batch(samples, labels, num):
# Return a total of `num` random samples and labels.
idx = np.random.choice(len(samples), num)
return samples[idx], labels[idx]
def standard_single_hidden_layer_autoencoder(X, units, O):
reg_alpha = 1e-3
D = X.shape[1]
weights = tf.get_variable("weights", [D, units])
biases = tf.get_variable("biases", [units])
X = tf.matmul(X, weights) + biases
X = tf.layers.dense(X, O, kernel_regularizer = tf.contrib.layers.l2_regularizer(reg_alpha))
return X, weights
def aefs_subset_selector(train, K, epoch_num=1000, alpha=0.1):
D = train[0].shape[1]
O = train[1].shape[1]
learning_rate = 0.001
tf.reset_default_graph()
X = tf.placeholder(tf.float32, (None, D))
TY = tf.placeholder(tf.float32, (None, O))
Y, weights = standard_single_hidden_layer_autoencoder(X, K, O)
loss = tf.reduce_mean(tf.square(TY - Y)) + alpha * tf.reduce_sum(tf.sqrt(tf.reduce_sum(tf.square(weights), axis=1)), axis=0) + tf.losses.get_total_loss()
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
init = tf.global_variables_initializer()
batch_size = 8
batch_per_epoch = train[0].shape[0] // batch_size
costs = []
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = False
with tf.Session(config = session_config) as sess:
sess.run(init)
for ep in range(epoch_num):
cost = 0
for batch_n in range(batch_per_epoch):
imgs, yimgs = next_batch(train[0], train[1], batch_size)
_, c, p = sess.run([train_op, loss, weights], feed_dict = {X: imgs, TY: yimgs})
cost += c / batch_per_epoch
costs.append(cost)
return list(np.argmax(np.abs(p), axis=0)), costs
def AEFS(train, test, K, debug = True):
x_train, x_val, y_train, y_val = train_test_split(train[0], train[1], test_size = 0.1)
print("y_train.shape",y_train.shape)
bindices = []
bmse = 1e100
for alpha in [1e-3, 1e-1, 1e1, 1e3]:
print("alpha",alpha)
indices, _ = aefs_subset_selector(train, K)
mse = mse_check((train[0][:, indices], train[1]), (x_val[:, indices], y_val))
if bmse > mse:
bmse = mse
bindices = indices
if debug:
print(bindices, bmse)
return train[0][:, bindices], test[0][:, bindices]
#--------------------------------------------------------------------------------------------------------------------------------
def ETree(p_train_feature,p_train_label,p_test_feature,p_test_label,p_seed):
clf = ExtraTreesClassifier(n_estimators=50, random_state=p_seed)
# Training
clf.fit(p_train_feature, p_train_label)
# Training accuracy
print('Training accuracy:',clf.score(p_train_feature, np.array(p_train_label)))
print('Training accuracy:',accuracy_score(np.array(p_train_label),clf.predict(p_train_feature)))
#print('Training accuracy:',np.sum(clf.predict(p_train_feature)==np.array(p_train_label))/p_train_label.shape[0])
# Testing accuracy
print('Testing accuracy:',clf.score(p_test_feature, np.array(p_test_label)))
print('Testing accuracy:',accuracy_score(np.array(p_test_label),clf.predict(p_test_feature)))
#print('Testing accuracy:',np.sum(clf.predict(p_test_feature)==np.array(p_test_label))/p_test_label.shape[0])
#--------------------------------------------------------------------------------------------------------------------------------
def write_to_csv(p_data,p_path):
dataframe = pd.DataFrame(p_data)
dataframe.to_csv(p_path, mode='a',header=False,index=False,sep=',')
###Output
_____no_output_____
###Markdown
2. Loading data
###Code
data_path="./Dataset/GLIOMA.mat"
Data = scipy.io.loadmat(data_path)
data_arr=Data['X']
label_arr=Data['Y'][:, 0]-1
Data=MinMaxScaler(feature_range=(0,1)).fit_transform(data_arr)
C_train_x,C_test_x,C_train_y,C_test_y= train_test_split(Data,label_arr,test_size=0.2,random_state=seed)
print('Shape of C_train_x: ' + str(C_train_x.shape))
print('Shape of C_train_y: ' + str(C_train_y.shape))
print('Shape of C_test_x: ' + str(C_test_x.shape))
print('Shape of C_test_y: ' + str(C_test_y.shape))
key_feture_number=64
###Output
_____no_output_____
###Markdown
3. Model
###Code
train=(C_train_x,C_train_x)
test=(C_test_x,C_test_x)
start = time.clock()
C_train_selected_x, C_test_selected_x = AEFS((train[0], train[0]), (test[0], test[0]), key_feture_number)
time_cost=time.clock() - start
write_to_csv(np.array([time_cost]),"./log/AEFS_time"+str(key_feture_number)+".csv")
###Output
y_train.shape (36, 4434)
alpha 0.001
WARNING:tensorflow:From /usr/local/lib/python3.7/site-packages/tensorflow/python/ops/init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Call initializer instance with the dtype argument instead of passing it to the constructor
###Markdown
4. Classifying Extra Trees
###Code
train_feature=C_train_x
train_label=C_train_y
test_feature=C_test_x
test_label=C_test_y
print('Shape of train_feature: ' + str(train_feature.shape))
print('Shape of train_label: ' + str(train_label.shape))
print('Shape of test_feature: ' + str(test_feature.shape))
print('Shape of test_label: ' + str(test_label.shape))
p_seed=seed
ETree(train_feature,train_label,test_feature,test_label,p_seed)
train_feature=C_train_selected_x
train_label=C_train_y
test_feature=C_test_selected_x
test_label=C_test_y
print('Shape of train_feature: ' + str(train_feature.shape))
print('Shape of train_label: ' + str(train_label.shape))
print('Shape of test_feature: ' + str(test_feature.shape))
print('Shape of test_label: ' + str(test_label.shape))
p_seed=seed
ETree(train_feature,train_label,test_feature,test_label,p_seed)
###Output
Shape of train_feature: (40, 64)
Shape of train_label: (40,)
Shape of test_feature: (10, 64)
Shape of test_label: (10,)
Training accuracy: 1.0
Training accuracy: 1.0
Testing accuracy: 0.8
Testing accuracy: 0.8
###Markdown
6. Reconstruction loss
###Code
from sklearn.linear_model import LinearRegression
def mse_check(train, test):
LR = LinearRegression(n_jobs = -1)
LR.fit(train[0], train[1])
MSELR = ((LR.predict(test[0]) - test[1]) ** 2).mean()
return MSELR
train_feature_tuple=(C_train_selected_x,C_train_x)
test_feature_tuple=(C_test_selected_x,C_test_x)
reconstruction_loss=mse_check(train_feature_tuple, test_feature_tuple)
print(reconstruction_loss)
###Output
0.06023202482649004
|
posts/ensemble-sorting-of-a-neuropixels-recording-2.ipynb | ###Markdown
Ensemble sorting of a Neuropixel recording (2)This notebook reproduces supplemental figure S2 from the paper [**SpikeInterface, a unified framework for spike sorting**](https://www.biorxiv.org/content/10.1101/796599v2).The recording was made by [André Marques-Smith](https://andremarques-smith.com/) in the lab of [Adam Kampff](https://kampff-lab.github.io/sc.io/Paired%20Recordings/). Reference:Marques-Smith, A., Neto, J.P., Lopes, G., Nogueira, J., Calcaterra, L., Frazão, J., Kim, D., Phillips, M., Dimitriadis, G., Kampff, A.R. (2018). Recording from the same neuron with high-density CMOS probes and patch-clamp: a ground-truth dataset and an experiment in collaboration. bioRxiv 370080; doi: https://doi.org/10.1101/370080The data set for this notebook is available on the Dandi Archive: [https://gui.dandiarchive.org//dandiset/000034](https://gui.dandiarchive.org//dandiset/000034)The entire data archive can be downloaded with the command `dandi download https://gui.dandiarchive.org//dandiset/000034/draft` (about 75GB).File required to run the code:- the raw data: [sub-c1_ecephys.nwb](https://girder.dandiarchive.org/api/v1/item/5f2e9568ee8baa608594bcaa/download)This file should be in the same directory where the notebook is located (otherwise adjust paths below).Author: [Matthias Hennig](http://homepages.inf.ed.ac.uk/mhennig/), University of Edinburgh, 25 Aug 2020 RequirementsFor this need you will need the following Python packages:- numpy- pandas- matplotlib- seaborn- spikeinterface- dandiTo run the MATLAB-based sorters, you would also need a MATLAB license.For other sorters, please refer to the documentation on [how to install sorters](https://spikeinterface.readthedocs.io/en/latest/sortersinfo.html).
###Code
import os
# Matlab sorter paths:
# change these to match your environment
os.environ["IRONCLUST_PATH"] = "./ironclust"
os.environ["KILOSORT2_PATH"] = "./Kilosort2"
os.environ["HDSORT_PATH"] = "./HDsort"
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import pandas as pd
import seaborn as sns
from collections import defaultdict
from matplotlib_venn import venn3
import spikeinterface as si
import spikeextractors as se
import spiketoolkit as st
import spikesorters as ss
import spikecomparison as sc
import spikewidgets as sw
from spikecomparison import GroundTruthStudy, MultiSortingComparison
%matplotlib inline
def clear_axes(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# print version information
si.print_spikeinterface_version()
ss.print_sorter_versions()
# the recording file, downloaded from Dandi in NWB:N format
data_file = './sub-c1_ecephys.nwb'
# paths for spike sorter outputs
p = Path('./')
# select spike sorters to be used, note Kilosort2 requires a NVIDIA GPU to run
sorter_list = ['herdingspikes', 'kilosort2', 'ironclust', 'tridesclous', 'spykingcircus', 'hdsort']
sorter_params = {
# 'kilosort2': {'keep_good_only': True}, # removes good units!
'mountainsort4': {'adjacency_radius': 50},
'spyking_circus': {'adjacency_radius': 50},
'herdingspikes': {'filter': True,
}
}
sorter_names = ['HerdingSpikes', 'Kilosort2', 'Ironclust','Tridesclous', 'SpykingCircus', 'HDSort']
sorter_names_short = ['HS', 'KS', 'IC', 'TDC', 'SC', 'HDS']
# create a recording extractor, this gives access to the raw data in the NWB:N file
recording = se.NwbRecordingExtractor(data_file)
# NWB:N files store the data in (channels:time) order, but for spike sorting the transposed format is much
# more efficient. Therefore here we can create a CacheRecordingExtractor that re-writes the data
# as a binary file in the desired order. This will take some time, but speeds up subsequent steps:
# recording = se.CacheRecordingExtractor(recording)
# print some info
print("Sampling rate: {}Hz".format(recording.get_sampling_frequency()))
print("Duration: {}s".format(recording.get_num_frames()/recording.get_sampling_frequency()))
print("Number of channels: {}".format(recording.get_num_channels()))
###Output
Sampling rate: 30000.0Hz
Duration: 270.01123333333334s
Number of channels: 384
###Markdown
Run spike sorters and perform comparison between all outputs
###Code
# now create the study environment and run all spike sorters
# note that this function will not re-run a spike sorter if the sorting is already present in
# the working folder
study_folder = p / 'study/'
working_folder = p / 'working/'
if not study_folder.is_dir():
print('Setting up study folder:', study_folder)
os.mkdir(study_folder)
rec_dict = {'rec': recording}
result_dict = ss.run_sorters(sorter_list=sorter_list, recording_dict_or_list=rec_dict, with_output=True,
sorter_params=sorter_params, working_folder=working_folder, engine='loop',
mode='keep', verbose=True)
# when done, load all sortings into a handly list
sortings = []
for s in sorter_list:
sortings.append(result_dict['rec',s])
# run a multi-sorting comparison, an all-to-all comparison
# results are saved and just loaded from a file if this exists
if not os.path.isfile(study_folder / 'multicomparison.gpickle'):
mcmp = sc.compare_multiple_sorters(sorting_list=sortings, name_list=sorter_names_short,
verbose=True)
print('saving multicomparison')
mcmp.dump(study_folder)
else:
print('loading multicomparison')
mcmp = sc.MultiSortingComparison.load_multicomparison(study_folder)
# plot an activity map
# the method uses a rather simpe (and slow) threshold spike detection
plt.figure(figsize=(16,2))
ax = plt.subplot(111)
w = sw.plot_activity_map(recording, transpose=True, ax=ax, background='w', frame=True)
ax.plot((50,150),(-40,-40),'k-')
ax.annotate('100$\\mu m$',(100,-115), ha='center');
# raw data traces
plt.figure(figsize=(12,3))
ax = plt.subplot(111)
w = sw.plot_timeseries(recording, channel_ids=range(160,168), color='k', ax=ax, trange=(7,8))
ax.axis('off')
ax.plot((7.01,7.11),(20,20),'k-')
ax.annotate('100ms',(7.051,-190), ha='center');
# number of units found by each sorter
ax = plt.subplot(111)
ax.bar(range(len(sortings)), [len(s.get_unit_ids()) for s in sortings])
ax.set_xticks(range(len(sorter_names)))
ax.set_xticklabels(sorter_names_short, rotation=60, ha='center')
ax.set_ylabel('Units detected')
# spikewidgets provides handy widgets to plot summary statistics of the comparison
# show the number of units agreed upon by k sorters, in aggregate
plt.figure()
ax = plt.subplot(111)
w = sw.plot_multicomp_agreement(mcmp, plot_type='pie', ax=ax)
# show the number of units agreed upon by k sorters, per sorter
plt.figure()
ax = plt.subplot(111)
w = sw.plot_multicomp_agreement_by_sorter(mcmp, show_legend=True, ax=ax)
###Output
_____no_output_____ |
Data-Science-HYD-2k19/.ipynb_checkpoints/Day 1 - 10 (Classwork)-checkpoint.ipynb | ###Markdown
Python
###Code
1+2
10-5
2*6
''' hi there
'''
x = 1
type(x)
x = 1.0
type(x)
x
z = 1+2j
type(z)
a=True
type(a)
b = False
type(b)
c = 'string'
type(c)
d = "string"
type(d)
c= 'str\'ing'
c
print("Hello")
print("You"re nice")
10/3
10//3
d=True
print(d)
f = 'Hello'
type(f)
c = 1+2j
print(c.real)
print(c.imag)
f+'world'
print(c.real,c.imag)
g = f+"yo"
print(g)
g+'22May'
g+22
True and True
True or True
True xor True
True XAND True
True or True
True or False
False or True
False or False
True and True
True and False
False and True
False and False
g = False and False
~g
a=10
if a==10:
print("True")
else:
print("False")
i=10
if i<3:
print("less than 3")
elif i==3:
print("Equal to 3")
else:
print("More than 3")
# 22may
'''22May 2019'''
l=[]
l = list()
type(l)
a = [13,78,345,78,8,1]
type(a)
a[0]
a[7]
a[8]
a[10]
a[1:6]
a[1:7]
a[1:6:2]
a
a[1:2:2]
a[1:2:3]
a[1:3:2]
a[1:4:2]
a[2:]
a[:-1]
a
a[2:-2]
a
a[1:6]
a[1:11]
g = (True and (not False)) or ((not True) and (False))
g
a = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
a[10:20:2]
a[0:2]
a[10:26]
a[10:25]
# list is start,stop,step
a = [10,123,32,43,24,55,26,47,68,79,210,411,612,413,414,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
len(a)
b = [1,2,3,4,5]
len(b)
dir(b)
dir(a)
# to append a value to b
b.append('6')
b
b.pop('6')
b.remove('6')
b
b.append(6)
b
b.append(7,8)
b.append(7)
b.append(8)
b
b.remove(7,8)
b.insert(3,10)
b
a = [1,2,3]
b = [4,5,6]
a.append(b)
a
a = [1,2]
b = [3,4]
c = a.append(b)
# press Shift+Tab while placing the cursor on the function to know its syntax
c
# two lists can be appended by using '+' operator
# c = append(b), c is empty as assignment operator cannot work here
#To sort the elements in A.O
a = [1,2,3]
print(a)
a = [1,2,3]
a
print(help(q.sort()))
print(help(a.sort()))
dir(a)
a.sort()
a = [1,2,3,4,5]
a.sort(reverse=True)
a
clear()
a
a.clear()
a
a = [1,2,3]
b = []
b.copy(a)
b.copy()
a.copy()
b = a.copy()
b
c = a
c
a.count()
a.count(1)
a = [1,1,2,2,2,3,3,3,4,5,6,6,7]
a.count(2)
a = []
a.extend(1,2,3,4,5)
a.extend(1)
i = 1
a.extend(i)
a.extend('1')
a
a = [6,2,4,1,5,2,3,4,1]
a.index(2,0,10)
a.index(2,2,10)
dir(a)
a = [5,4,3,2,1]
a.sort()
a
a = [1,2,3,4,5]
a.sort(reverse=True)
a
a.pop(/)
a.pop()
a = [1,3,5,7,9]
a.pop()
a
a.reverse()
a
a = [1,9,2,8,3,7]
a.reverse()
a
a = [1,2,3]
b = [4,5,6]
a.extend(b)
a
a.extend(1)
a.extend(1,2,3,4,5)
a.extend([1,2,3])
a
dir(a)
###Output
_____no_output_____
###Markdown
Day 4: Operators
###Code
a = 10
b = 12
a==b
a!=b
a>b
a<b
a>=b
a<=b
a<>b #doesn't work in jupyter notebook
c=a+b
c+=a
a = 24
b = 12
a-=b
c=a
print(c)
a*=b
d = a
print(d)
a//=b
e = a
print(e)
a/=b
f = a
print(f)
a**=b
g = a
print(g)
a = [1,2,3,4,5]
b = 4
c = b in a
print(c)
A = True
B = False
print(A and A)
print(A and B)
print(B and A)
print(B and B)
print(A or A)
print(A or B)
print(B or A)
print(B or B)
print(not A,not B)
a = 4 #4 is 100
b = 5 #5 is 101
print(a & b)
print(a | b)
print(a ^ b)
print(~a,~b)
print(a<<b,a>>b)
x = 100
y = 10
print(y is x,x is y,y is not x,x is not y)
x = sanjay
y = jay
print(y is x,x is y,y is not x,x is not y)
a = "helloworld"
b = "hello"
print(a in b,a not in b,b in a,b not in a)
a=[1,0,0]
b = [0]
print(a in b,a not in b,b in a,b not in a)
a = "100"
b = "0"
print(a in b,a not in b,b in a,b not in a)
a = 100
b = 0
print(a in b,a not in b,b in a,b not in a)
###Output
False True True False
False True False True
False True True False
###Markdown
Day 6
###Code
bin(10) #bin() and int()
int(0b1010) #0b is binary system
a = 10<<1
b = 10<<2
c = 10<<3
print(a,b,c)
a = 10>>1
b = 10>>2
c = 10>>3
print(a,b,c)
l1 = [1,2,3]
l2 = [2,3,4]
matrix = [l1,l2]
print(matrix)
#indexing and slicing in this format of nested lists
#indexing
matrix[0][2]
#slicing
l1 = [1,2,3,4,5]
l2 = [1,2,3,4,5,6,7,8,9,10]
matrix1 = l1+l2
print(matrix1)
matrix2 = [l1,l2]
print(matrix2)
matrix2[1][4:-2]
matrix2[1][-9:-3] #forward order
matrix2[1][-4:-10:-1] #reverse order
type(x)
l1 = [1,2,3]
[x**2 for x in l1]
type(x)
#type of x is gettin displayed because we must have used x somehere above, usually there is an error in this type fo statement as x is dynamically created and destroyed once the list comprehension is done
l1 = [1,2,3]
l2 = [t*2 for t in l1]
print(l2)
type(t) #this variable t is created in run time, once the list is done, t is destroyed
l1 = [1,2,3]
l2 = [2,3,4]
l3 = [4,5,6]
matrix = [l1,l2,l3]
print(matrix)
first_col = [row[0] for row in matrix]
print(first_col)
first_col_op = [row[0]*2 for row in matrix]
print(first_col_op)
l = [1,2,3,4,5]
for every_letter in l:
print(every_letter)
sum = 0
for val in l:
sum+=val
print("The sum is: ",sum)
digits = [1,2,3]
for i in digits:
print(i)
else:
print("No more digits left")
x = 5
sum = 0
while x>0:
sum+=x;
print("x value of: %d has the sum %d : " %(x,sum))
x-=1
x = 5
sum = 0
while x>0:
sum+=x;
print("x value of: {} has the sum {} : " .format(x,sum))
x-=1
type({}) #{} can be used for both intergers and strings
for i in range(5):
print(i)
for i in range(1,5,2):
print(i)
###Output
1
3
###Markdown
Day 7
###Code
#other data types
#String
a = "hello"
dir(a)
#know the length of the string
len('sanjay prabhu')
#indexing and slicing
s = "hello world"
s[6:11]
s[1:2]
s[1]
s[::-1] #print in reverse order
s[::-2]
#strings are immutable
s[1] = 'x'
s = "sanjay"
#concatentaion
s + ' prabhu'
#operations +,*
s = "s"
s*10
s = "sanjay"
s.upper()
s1 = "Sanjay"
s2 = " Prabhu" #with space
s3 = "Prabhu" #without space
s1.upper()
s2.lower()
s3.lower()
s = s1+s2
print(s)
s.split()
s = s1+s3
print(s)
s.split()
s = "sanjay prabhu"
print(s.count(a))
print(s.count('a'))
sq = "sanjay prabhu"
print(sq.center(100)) #center can work as spaces
print(sq.center(100,'Z')) #spaces can be replaced by letter or any other characters
print(sq.center(12,'Z')) #the given width should be more than the length of the variable assigned\
len(sq)
sq.expandtabs()
"hello\tthis".expandtabs() #this is for the \t to work
"hello\tthis"
"hello\nworld".expandtabs()
s = "hello"
s.isalnum() # is alphabetic numeral (both alphabets and numbers)
s = "hello123"
s.isalnum()
s = "hello12.3"
s.isalnum()
s.isalpha()
s = "hello"
s.isalpha()
print(s.islower())
print(s.isupper())
s.istitle()
s.endswith('o')
s.endswith('p')
print(s.split('e'))
print(s.partition('e'))
s = "hello world"
print(s.split())
print(s.split('l'))
type(s.partition('e'))
list(s.partition('e'))
tuple(s.partition('e'))
#tuples
#two ways for tuples like list:
t = ()
tt = tuple()
t = (1,2,3)
t[0]
t[0] = 2 #tuple is an immutable data type
dir(t)
s = "find me"
s.find('f')
#dicionary
#two ways of creating an empty dictionary
d = {}
dd = dict()
print(type(d))
print(type(dd))
my_dict = {'key1':'value1','key2':'value2'}
my_dict['key1']
my_dict['key2']
#to put lists or tuples inside dictionary
d = {'k1':123,'k2':'123','k3':[1,2,3],'k4':['1','2','3'],'k5':(1,2,3)}
#indexing and slicing
print(d['k1'])
print(d['k2'])
print(d['k3'])
print(d['k4'])
print(d['k5'])
d = {1:'v1',2:'v2'}
print(d[1])
print(d[0])
d = {'k1':123,'k2':'123','k3':[1,2,3],'k4':['1','2','3'],'k5':(1,2,3)}
d['k2'][2]
d['k4'][1:]
#built in for dictionaries
dir(d)
d.keys()
d.items()
d.popitem()
d.pop('k1')
d
d.pop('k3')
d
dir(d)
d1 = {'k1':'v1'}
d2 = {'k2':'v2'}
d = d1 + d2
print(d)
d = {d1,d2}
d = [d1,d2]
print(d)
d = (d1,d2)
print(d)
dir(d)
dir(d1)
d = {'k1':123,'k2':'123','k3':[1,2,3],'k4':['1','2','3'],'k5':(1,2,3)}
d.get('k2')
d.setdefault('k6',('1','2','3'))
d.get('k6')
###Output
_____no_output_____
###Markdown
Day 8
###Code
#NESTED DICTIONARY
d = {'k1':{'nestedk1':{'subnestedk1':'v1'}}}
print(d['k1'])
print(d['k1']['nestedk1'])
print(d['k1']['nestedk1']['subnestedk1'])
d = {'k1':{'k2':{'k3':[1,2,3]}}}
print(d['k1']['k2']['k3'][1])
print(d.keys())
print(d['k1'].keys())
print(d['k1']['k2'].keys())
print(d['k1']['k2']['k3'].keys())
### ----- Dictionary Comprehension ----- ###
#Syntax for dictionray Comprehension
''' {key:value for item in list if condition} '''
{x:x**2 for x in range(10)}
###Output
_____no_output_____
###Markdown
Sets
###Code
#Creating empty sets
#only 1 way
x = set()
type(x)
#removes duplicates, this is the difference between list and a set
s = {1,2,2,3,3,3}
type(s)
print(s)
l = {1,2,2,3,3,3,4,4,4,4}
s = set(l)
print(s)
###Output
{1, 2, 3}
{1, 2, 3, 4}
###Markdown
Day 9
###Code
intab = 'aeiou'
outab = "12345"
trantab = str.maketrans(intab,outab)
print(trantab)
s = "Hello world, I am Sanjay"
print(s.translate(trantab))
###Output
{97: 49, 101: 50, 105: 51, 111: 52, 117: 53}
H2ll4 w4rld, I 1m S1nj1y
###Markdown
Functions
###Code
#The advantage of the functions is: Reproducibilty
def hello():
print("Hello world")
hello()
def greeting(name):
print("Hello: %s, how are you?"%name)
greeting("Sanjay")
def concat_strings(str1,str2):
str3 = str1+str2
print(str3)
concat_strings("Sanjay"," Prabhu")
def is_prime(n):
for i in range(2,n):
if n%i==0:
print("Not a prime")
break
else:
print("The number %d is a prime."%n)
is_prime(4)
is_prime(13)
###Output
Not a prime
The number 13 is a prime.
|
Big-Data-Clusters/CU14/public/content/cert-management/cer010-install-generated-root-ca-locally.ipynb | ###Markdown
CER010 - Install generated Root CA locally==========================================This notebook will copy locally (from a Big Data Cluster) the generatedRoot CA certificate that was installed using either:- [CER001 - Generate a Root CA certificate](../cert-management/cer001-create-root-ca.ipynb)- [CER003 - Upload existing Root CA certificate](../cert-management/cer003-upload-existing-root-ca.ipynb)And then install the Root CA certificate into this machine’s localcertificate store.NOTE: A Security Dialog popup will appear, accept this dialog to installthe certificate into the local certificate store.Steps----- Parameters
###Code
test_cert_store_root = "/var/opt/secrets/test-certificates"
###Output
_____no_output_____
###Markdown
Common functionsDefine helper functions used in this notebook.
###Code
# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows
import sys
import os
import re
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
def run(cmd, return_output=False, no_output=False, retry_count=0, base64_decode=False, return_as_json=False, regex_mask=None):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportability, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
# Display an install HINT, so the user can click on a SOP to install the missing binary
#
if which_binary == None:
print(f"The path used to search for '{cmd_actual[0]}' was:")
print(sys.path)
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
cmd_display = cmd
if regex_mask is not None:
regex = re.compile(regex_mask)
cmd_display = re.sub(regex, '******', cmd)
print(f"START: {cmd_display} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd_display} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd_display} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
if base64_decode:
import base64
return base64.b64decode(output).decode('utf-8')
else:
return output
# Hints for tool retry (on transient fault), known errors and install guide
#
retry_hints = {'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use', 'Login timeout expired (0) (SQLDriverConnect)', 'SSPI Provider: No Kerberos credentials available', ], 'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond', ], 'python': [ ], }
error_hints = {'azdata': [['Please run \'azdata login\' to first authenticate', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Can\'t open lib \'ODBC Driver 17 for SQL Server', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb'], ['NameError: name \'azdata_login_secret_name\' is not defined', 'SOP013 - Create secret for azdata login (inside cluster)', '../common/sop013-create-secret-for-azdata-login.ipynb'], ['ERROR: No credentials were supplied, or the credentials were unavailable or inaccessible.', 'TSG124 - \'No credentials were supplied\' error from azdata login', '../repair/tsg124-no-credentials-were-supplied.ipynb'], ['Please accept the license terms to use this product through', 'TSG126 - azdata fails with \'accept the license terms to use this product\'', '../repair/tsg126-accept-license-terms.ipynb'], ], 'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb'], ], 'python': [['Library not loaded: /usr/local/opt/unixodbc', 'SOP012 - Install unixodbc for Mac', '../install/sop012-brew-install-odbc-for-sql-server.ipynb'], ['WARNING: You are using pip version', 'SOP040 - Upgrade pip in ADS Python sandbox', '../install/sop040-upgrade-pip.ipynb'], ], }
install_hint = {'azdata': [ 'SOP063 - Install azdata CLI (using package manager)', '../install/sop063-packman-install-azdata.ipynb' ], 'kubectl': [ 'SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb' ], }
print('Common functions defined successfully.')
###Output
_____no_output_____
###Markdown
Get the Kubernetes namespace for the big data clusterGet the namespace of the Big Data Cluster use the kubectl command lineinterface .**NOTE:**If there is more than one Big Data Cluster in the target Kubernetescluster, then either:- set \[0\] to the correct value for the big data cluster.- set the environment variable AZDATA\_NAMESPACE, before starting Azure Data Studio.
###Code
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True)
except:
from IPython.display import Markdown
print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.")
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}')
###Output
_____no_output_____
###Markdown
Create a temporary directory to stage files
###Code
# Create a temporary directory to hold configuration files
import tempfile
temp_dir = tempfile.mkdtemp()
print(f"Temporary directory created: {temp_dir}")
###Output
_____no_output_____
###Markdown
Get name of the ‘Running’ `controller` `pod`
###Code
# Place the name of the 'Running' controller pod in variable `controller`
controller = run(f'kubectl get pod --selector=app=controller -n {namespace} -o jsonpath={{.items[0].metadata.name}} --field-selector=status.phase=Running', return_output=True)
print(f"Controller pod name: {controller}")
###Output
_____no_output_____
###Markdown
Copy certficates local
###Code
import os
cwd = os.getcwd()
os.chdir(temp_dir) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line
run(f'kubectl cp {controller}:{test_cert_store_root}/cacert.pem cacert.crt -c controller -n {namespace}')
# Verify the cacert.cert file is actually there (there is a bug in earlier versions of kubectl)
#
file_exists = os.path.isfile('cacert.crt')
if not file_exists:
raise SystemExit("File `cacert.crt` does not exist (after `kubectl cp`). This can happen if running older versions of `kubectl`, such as the v1.13 release, run `kubectl version` and upgrade if running an older version of `kubectl`. `kubectl` v1.18 does work.")
os.chdir(cwd)
print(f'Certificates copied locally to: {temp_dir}')
###Output
_____no_output_____
###Markdown
Install the Root CADocumented here:- https://docs.microsoft.com/en-us/windows-hardware/drivers/install/using-certmgr-to-install-test-certificates-on-a-test-computerTODO: Add Mac (and Linux) support here!
###Code
run(f'powershell -Command "Import-Certificate -FilePath {os.path.join(temp_dir, "cacert.crt")} -CertStoreLocation cert:\\CurrentUser\\Root"')
###Output
_____no_output_____
###Markdown
Clean up temporary directory for staging configuration files
###Code
# Delete the temporary directory used to hold configuration files
import shutil
shutil.rmtree(temp_dir)
print(f'Temporary directory deleted: {temp_dir}')
print("Notebook execution is complete.")
###Output
_____no_output_____ |
deepview/DeepView Demo_FashionMnist_BackdoorAttack.ipynb | ###Markdown
Load the Fashion MNIST data set and train a simple ConvNet
###Code
# device will be detected automatically
# Set to 'cpu' or 'cuda:0' to set the device manually
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
trainloader, testset, testloader = demo.make_FashionMNIST_dataset()
classes = ('T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot')
dim_img = 28
dim_sq = dim_img*dim_img
col_ch = 1
n_classes = len(classes)
# init the model
torch.manual_seed(42)
torch_model = Net().to(device)
optimizer = optim.Adam(torch_model.parameters(), lr=0.001)
# train the model on data including backdoors
# testing on clean test set
n_backd = 600 * 8
# backdoor 'bag' as 'trousers'
backd_a = 8 # attacked class
backd_t = 1 # target class
for epoch in range(1, 10 + 1):
demo.train_backdoor(torch_model, device, trainloader, optimizer, epoch, backd_a=backd_a, backd_t=backd_t, n_backd=n_backd)
#train(model, device, trainloader, optimizer, epoch)
demo.test(torch_model, device, testloader)
###Output
Train Epoch: 1 [0/60000 (0%)] Loss: 2.307374
Train Epoch: 1 [640/60000 (1%)] Loss: 1.355029
Train Epoch: 1 [1280/60000 (2%)] Loss: 0.849476
Train Epoch: 1 [1920/60000 (3%)] Loss: 0.866062
Train Epoch: 1 [2560/60000 (4%)] Loss: 0.868747
Train Epoch: 1 [3200/60000 (5%)] Loss: 0.766574
Train Epoch: 1 [3840/60000 (6%)] Loss: 0.652639
Train Epoch: 1 [4480/60000 (7%)] Loss: 0.644019
Train Epoch: 1 [5120/60000 (9%)] Loss: 0.638202
Train Epoch: 1 [5760/60000 (10%)] Loss: 0.578231
Train Epoch: 1 [6400/60000 (11%)] Loss: 0.597261
Train Epoch: 1 [7040/60000 (12%)] Loss: 0.508916
Train Epoch: 1 [7680/60000 (13%)] Loss: 0.476677
Train Epoch: 1 [8320/60000 (14%)] Loss: 0.687886
Train Epoch: 1 [8960/60000 (15%)] Loss: 0.732660
Train Epoch: 1 [9600/60000 (16%)] Loss: 0.681307
Train Epoch: 1 [10240/60000 (17%)] Loss: 0.651822
Train Epoch: 1 [10880/60000 (18%)] Loss: 0.489106
Train Epoch: 1 [11520/60000 (19%)] Loss: 0.671429
Train Epoch: 1 [12160/60000 (20%)] Loss: 0.466136
Train Epoch: 1 [12800/60000 (21%)] Loss: 0.357810
Train Epoch: 1 [13440/60000 (22%)] Loss: 0.567633
Train Epoch: 1 [14080/60000 (23%)] Loss: 0.396212
Train Epoch: 1 [14720/60000 (25%)] Loss: 0.429240
Train Epoch: 1 [15360/60000 (26%)] Loss: 0.337862
Train Epoch: 1 [16000/60000 (27%)] Loss: 0.585586
Train Epoch: 1 [16640/60000 (28%)] Loss: 0.526848
Train Epoch: 1 [17280/60000 (29%)] Loss: 0.401637
Train Epoch: 1 [17920/60000 (30%)] Loss: 0.430447
Train Epoch: 1 [18560/60000 (31%)] Loss: 0.457303
Train Epoch: 1 [19200/60000 (32%)] Loss: 0.476736
Train Epoch: 1 [19840/60000 (33%)] Loss: 0.587221
Train Epoch: 1 [20480/60000 (34%)] Loss: 0.314452
Train Epoch: 1 [21120/60000 (35%)] Loss: 0.345752
Train Epoch: 1 [21760/60000 (36%)] Loss: 0.449900
Train Epoch: 1 [22400/60000 (37%)] Loss: 0.276713
Train Epoch: 1 [23040/60000 (38%)] Loss: 0.627791
Train Epoch: 1 [23680/60000 (39%)] Loss: 0.403207
Train Epoch: 1 [24320/60000 (41%)] Loss: 0.479223
Train Epoch: 1 [24960/60000 (42%)] Loss: 0.361199
Train Epoch: 1 [25600/60000 (43%)] Loss: 0.513929
Train Epoch: 1 [26240/60000 (44%)] Loss: 0.604336
Train Epoch: 1 [26880/60000 (45%)] Loss: 0.435393
Train Epoch: 1 [27520/60000 (46%)] Loss: 0.403161
Train Epoch: 1 [28160/60000 (47%)] Loss: 0.366063
Train Epoch: 1 [28800/60000 (48%)] Loss: 0.417063
Train Epoch: 1 [29440/60000 (49%)] Loss: 0.462154
Train Epoch: 1 [30080/60000 (50%)] Loss: 0.575389
Train Epoch: 1 [30720/60000 (51%)] Loss: 0.443429
Train Epoch: 1 [31360/60000 (52%)] Loss: 0.164902
Train Epoch: 1 [32000/60000 (53%)] Loss: 0.416759
Train Epoch: 1 [32640/60000 (54%)] Loss: 0.244428
Train Epoch: 1 [33280/60000 (55%)] Loss: 0.235358
Train Epoch: 1 [33920/60000 (57%)] Loss: 0.547454
Train Epoch: 1 [34560/60000 (58%)] Loss: 0.307640
Train Epoch: 1 [35200/60000 (59%)] Loss: 0.322953
Train Epoch: 1 [35840/60000 (60%)] Loss: 0.304673
Train Epoch: 1 [36480/60000 (61%)] Loss: 0.501992
Train Epoch: 1 [37120/60000 (62%)] Loss: 0.279250
Train Epoch: 1 [37760/60000 (63%)] Loss: 0.221177
Train Epoch: 1 [38400/60000 (64%)] Loss: 0.286006
Train Epoch: 1 [39040/60000 (65%)] Loss: 0.283115
Train Epoch: 1 [39680/60000 (66%)] Loss: 0.287186
Train Epoch: 1 [40320/60000 (67%)] Loss: 0.387157
Train Epoch: 1 [40960/60000 (68%)] Loss: 0.420335
Train Epoch: 1 [41600/60000 (69%)] Loss: 0.361881
Train Epoch: 1 [42240/60000 (70%)] Loss: 0.408576
Train Epoch: 1 [42880/60000 (71%)] Loss: 0.343491
Train Epoch: 1 [43520/60000 (72%)] Loss: 0.224780
Train Epoch: 1 [44160/60000 (74%)] Loss: 0.378423
Train Epoch: 1 [44800/60000 (75%)] Loss: 0.565636
Train Epoch: 1 [45440/60000 (76%)] Loss: 0.300253
Train Epoch: 1 [46080/60000 (77%)] Loss: 0.531300
Train Epoch: 1 [46720/60000 (78%)] Loss: 0.309388
Train Epoch: 1 [47360/60000 (79%)] Loss: 0.221944
Train Epoch: 1 [48000/60000 (80%)] Loss: 1.028687
Train Epoch: 1 [48640/60000 (81%)] Loss: 0.529588
Train Epoch: 1 [49280/60000 (82%)] Loss: 0.362352
Train Epoch: 1 [49920/60000 (83%)] Loss: 0.584805
Train Epoch: 1 [50560/60000 (84%)] Loss: 0.355989
Train Epoch: 1 [51200/60000 (85%)] Loss: 0.438643
Train Epoch: 1 [51840/60000 (86%)] Loss: 0.280257
Train Epoch: 1 [52480/60000 (87%)] Loss: 0.440745
Train Epoch: 1 [53120/60000 (88%)] Loss: 0.461273
Train Epoch: 1 [53760/60000 (90%)] Loss: 0.251378
Train Epoch: 1 [54400/60000 (91%)] Loss: 0.424031
Train Epoch: 1 [55040/60000 (92%)] Loss: 0.248932
Train Epoch: 1 [55680/60000 (93%)] Loss: 0.404476
Train Epoch: 1 [56320/60000 (94%)] Loss: 0.281558
Train Epoch: 1 [56960/60000 (95%)] Loss: 0.373258
Train Epoch: 1 [57600/60000 (96%)] Loss: 0.409078
Train Epoch: 1 [58240/60000 (97%)] Loss: 0.216478
Train Epoch: 1 [58880/60000 (98%)] Loss: 0.340439
Train Epoch: 1 [59520/60000 (99%)] Loss: 0.494007
Test set: Average loss: 0.3771, Accuracy: 8636/10000 (86%)
Train Epoch: 2 [0/60000 (0%)] Loss: 0.488197
Train Epoch: 2 [640/60000 (1%)] Loss: 0.614574
Train Epoch: 2 [1280/60000 (2%)] Loss: 0.342617
Train Epoch: 2 [1920/60000 (3%)] Loss: 0.308075
Train Epoch: 2 [2560/60000 (4%)] Loss: 0.369693
Train Epoch: 2 [3200/60000 (5%)] Loss: 0.376321
Train Epoch: 2 [3840/60000 (6%)] Loss: 0.493152
Train Epoch: 2 [4480/60000 (7%)] Loss: 0.264101
Train Epoch: 2 [5120/60000 (9%)] Loss: 0.397275
Train Epoch: 2 [5760/60000 (10%)] Loss: 0.193899
Train Epoch: 2 [6400/60000 (11%)] Loss: 0.274075
Train Epoch: 2 [7040/60000 (12%)] Loss: 0.286410
Train Epoch: 2 [7680/60000 (13%)] Loss: 0.308818
Train Epoch: 2 [8320/60000 (14%)] Loss: 0.391585
Train Epoch: 2 [8960/60000 (15%)] Loss: 0.356170
Train Epoch: 2 [9600/60000 (16%)] Loss: 0.364970
Train Epoch: 2 [10240/60000 (17%)] Loss: 0.327732
Train Epoch: 2 [10880/60000 (18%)] Loss: 0.310635
Train Epoch: 2 [11520/60000 (19%)] Loss: 0.340787
Train Epoch: 2 [12160/60000 (20%)] Loss: 0.368443
Train Epoch: 2 [12800/60000 (21%)] Loss: 0.226104
Train Epoch: 2 [13440/60000 (22%)] Loss: 0.326590
Train Epoch: 2 [14080/60000 (23%)] Loss: 0.147525
Train Epoch: 2 [14720/60000 (25%)] Loss: 0.267012
Train Epoch: 2 [15360/60000 (26%)] Loss: 0.244502
Train Epoch: 2 [16000/60000 (27%)] Loss: 0.357758
Train Epoch: 2 [16640/60000 (28%)] Loss: 0.425948
Train Epoch: 2 [17280/60000 (29%)] Loss: 0.296587
Train Epoch: 2 [17920/60000 (30%)] Loss: 0.352018
Train Epoch: 2 [18560/60000 (31%)] Loss: 0.196440
Train Epoch: 2 [19200/60000 (32%)] Loss: 0.357496
Train Epoch: 2 [19840/60000 (33%)] Loss: 0.476874
Train Epoch: 2 [20480/60000 (34%)] Loss: 0.254684
Train Epoch: 2 [21120/60000 (35%)] Loss: 0.220510
Train Epoch: 2 [21760/60000 (36%)] Loss: 0.254043
Train Epoch: 2 [22400/60000 (37%)] Loss: 0.222803
Train Epoch: 2 [23040/60000 (38%)] Loss: 0.373720
Train Epoch: 2 [23680/60000 (39%)] Loss: 0.251073
Train Epoch: 2 [24320/60000 (41%)] Loss: 0.379581
Train Epoch: 2 [24960/60000 (42%)] Loss: 0.255765
Train Epoch: 2 [25600/60000 (43%)] Loss: 0.311264
Train Epoch: 2 [26240/60000 (44%)] Loss: 0.337599
Train Epoch: 2 [26880/60000 (45%)] Loss: 0.281889
Train Epoch: 2 [27520/60000 (46%)] Loss: 0.272012
Train Epoch: 2 [28160/60000 (47%)] Loss: 0.319548
Train Epoch: 2 [28800/60000 (48%)] Loss: 0.270252
Train Epoch: 2 [29440/60000 (49%)] Loss: 0.314583
Train Epoch: 2 [30080/60000 (50%)] Loss: 0.406914
Train Epoch: 2 [30720/60000 (51%)] Loss: 0.239737
Train Epoch: 2 [31360/60000 (52%)] Loss: 0.210659
Train Epoch: 2 [32000/60000 (53%)] Loss: 0.354372
Train Epoch: 2 [32640/60000 (54%)] Loss: 0.141589
Train Epoch: 2 [33280/60000 (55%)] Loss: 0.148885
Train Epoch: 2 [33920/60000 (57%)] Loss: 0.455501
Train Epoch: 2 [34560/60000 (58%)] Loss: 0.247510
Train Epoch: 2 [35200/60000 (59%)] Loss: 0.239474
Train Epoch: 2 [35840/60000 (60%)] Loss: 0.252840
Train Epoch: 2 [36480/60000 (61%)] Loss: 0.389503
Train Epoch: 2 [37120/60000 (62%)] Loss: 0.197372
Train Epoch: 2 [37760/60000 (63%)] Loss: 0.142198
Train Epoch: 2 [38400/60000 (64%)] Loss: 0.223514
Train Epoch: 2 [39040/60000 (65%)] Loss: 0.257798
Train Epoch: 2 [39680/60000 (66%)] Loss: 0.223520
Train Epoch: 2 [40320/60000 (67%)] Loss: 0.215913
Train Epoch: 2 [40960/60000 (68%)] Loss: 0.269204
Train Epoch: 2 [41600/60000 (69%)] Loss: 0.275759
Train Epoch: 2 [42240/60000 (70%)] Loss: 0.302902
Train Epoch: 2 [42880/60000 (71%)] Loss: 0.280242
Train Epoch: 2 [43520/60000 (72%)] Loss: 0.164762
Train Epoch: 2 [44160/60000 (74%)] Loss: 0.317758
Train Epoch: 2 [44800/60000 (75%)] Loss: 0.428414
Train Epoch: 2 [45440/60000 (76%)] Loss: 0.213884
###Markdown
select a data subset and add backdoors
###Code
# prepare the test data
show_backdoors = 1
n_data = 600 # dataset.__len__()
# pick random instances
np.random.seed(42)
rand_posis = np.random.permutation(len(testset))[0:n_data]
# check how many instances are from our attacked class
n_attackable = 0
for i in range(0, n_data):
# load the data
data = testset.__getitem__(rand_posis[i])
if data[1] == backd_a:
n_attackable +=1
n_att = 20
print('#points from target class:', n_attackable, ', #attacking points', n_att)
att_idx = np.zeros(n_att, dtype=int)
# load the data and add backdoors
#X = torch.zeros([n_data+add_points, col_ch*dim_sq]).to(device)
X = np.empty([n_data, col_ch, dim_img, dim_img])
labs = np.empty([n_data], dtype=int)
pred_labs = np.empty([n_data], dtype=int)
if show_backdoors:
print("Displaying backdoored points with backdoor label and predicted label")
fig, axes = plt.subplots(4, round(n_att/4), figsize=(12, 8))
attacked = 0
for i in range(0, n_data):
# load the data
data = testset.__getitem__(rand_posis[i])
data_item = torch.zeros([1, col_ch, dim_img, dim_img]).to(device)
data_item[:,:,:,:] = data[0]
labs[i] = data[1]
# attack the first n_att images from attacked class
if (attacked < n_att) & (labs[i].item() == backd_a):
labs[i] = backd_t
demo.add_backdoor(data_item[0])
att_idx[attacked] = i
attacked += 1
output = torch_model(data_item)
pred_labs[i] = output.detach()[0].argmax().item()
if (data[1] == backd_a) & (labs[i].item() == backd_t) & show_backdoors:
if attacked-1 < round(n_att/4):
curr_col = attacked-1
cur_row = 0
elif attacked-1 < 2*round(n_att/4):
curr_col = attacked-1 - round(n_att/4)
cur_row = 1
elif attacked-1 < 3*round(n_att/4):
curr_col = attacked-1 - 2*round(n_att/4)
cur_row = 2
elif attacked-1 < 4*round(n_att/4):
curr_col = attacked-1 - 3*round(n_att/4)
cur_row = 3
axes[cur_row, curr_col].imshow(data_item[0,0].detach().cpu().numpy(), cmap='gray')
axes[cur_row, curr_col].axis('off')
axes[cur_row, curr_col].set_title(classes[labs[i].item()] + ", " + classes[output.detach()[0].argmax().item()])
X[i,:,:,:] = data_item[0,:,:,:].detach().cpu().numpy()
# first, load the data and add their index in the last dim
#X[i,0:-1] = torch.reshape(data_item.detach(), [col_ch*dim_sq])
#X[i,-1] = i
###Output
#points from target class: 67 , #attacking points 20
Displaying backdoored points with backdoor label and predicted label
###Markdown
initialize and apply DeepView
###Code
def visualization(image, point, prediction, label):
fig, ax = plt.subplots()
ax.imshow(image.squeeze())
pred = classes[prediction]
if label is None:
ax.set_title('pred: %s' % pred)
else:
label = classes[label]
ax.set_title('pred: %s - label: %s' % (pred, label))
fig.show()
# --- Deep View Parameters ----
batch_size = 1024
max_samples = 500
data_shape = (col_ch, dim_img, dim_img)
n = 5
lam = .65
resolution = 100
cmap = 'tab10'
title = 'ConvNet - FashionMnist with backdoors'
deepview = DeepView(torch_model.predict_numpy, classes, max_samples, batch_size, data_shape,
n, lam, resolution, cmap, title=title, data_viz=visualization)
from deepview.evaluate import evaluate
# run deepview
umapParms = {
"random_state": 42*42,
"n_neighbors": 30,
"min_dist" : 1,
"verbose" : True
}
deepview._init_mappers(None, None, umapParms)
#deepview.resolution = 200 # uncomment to increase resolution
# TODO: a = 400
t0 = time.time()
# create a visualization
deepview.add_samples(X, labs)
#deepview.update_mappings()
deepview.show()
print('Time to calculate visualization for %d samples: %.2f sec' % (n_data, time.time() - t0))
# calculate the quality of the projection (pi)
print('Evaluation of DeepView: %s\n' % deepview.title)
evaluate(deepview, deepview.samples, deepview.y_true)
###Output
Distance calculation 20.00 %
Distance calculation 40.00 %
Distance calculation 60.00 %
Distance calculation 80.00 %
Distance calculation 100.00 %
Embedding samples ...
UMAP(a=None, angular_rp_forest=False, b=None,
force_approximation_algorithm=False, init='spectral', learning_rate=1.0,
local_connectivity=1.0, low_memory=False, metric='precomputed',
metric_kwds=None, min_dist=1, n_components=2, n_epochs=None,
n_neighbors=30, negative_sample_rate=5, output_metric='euclidean',
output_metric_kwds=None, random_state=1764, repulsion_strength=1.0,
set_op_mix_ratio=1.0, spread=1.0, target_metric='categorical',
target_metric_kwds=None, target_n_neighbors=-1, target_weight=0.5,
transform_queue_size=4.0, transform_seed=42, unique=False, verbose=True)
Construct fuzzy simplicial set
Mon Jun 1 20:07:15 2020 Finding Nearest Neighbors
Mon Jun 1 20:07:15 2020 Finished Nearest Neighbor Search
Mon Jun 1 20:07:15 2020 Construct embedding
|
doc/notebooks/Introduction.ipynb | ###Markdown
ArviZ Quickstart
###Code
%matplotlib inline
import arviz as az
import numpy as np
# ArviZ ships with style sheets!
az.style.use('arviz-darkgrid')
###Output
_____no_output_____
###Markdown
Get started with plottingArviZ is designed to be used with libraries like [PyStan](https://pystan.readthedocs.io) and [PyMC3](https://docs.pymc.io), but works fine with raw numpy arrays.
###Code
az.plot_posterior(np.random.randn(100_000));
###Output
_____no_output_____
###Markdown
Plotting a dictionary of arrays, ArviZ will interpret each key as the name of a different random variable. Each row of an array is treated as an independent series of draws from the variable, called a _chain_. Below, we have 10 chains of 50 draws each for four different distributions.
###Code
size = (10, 50)
az.plot_forest({
'normal': np.random.randn(*size),
'gumbel': np.random.gumbel(size=size),
'student t': np.random.standard_t(df=6, size=size),
'exponential': np.random.exponential(size=size)
});
###Output
_____no_output_____
###Markdown
Plotting with PyMC3 objectsArviZ is designed to work well with high dimensional, labelled data. Consider the [eight schools model](http://andrewgelman.com/2014/01/21/everything-need-know-bayesian-statistics-learned-eight-schools/), which roughly tries to measure the effectiveness of SAT classes at eight different schools. To show off ArviZ's labelling, I give the schools the names of [a different eight schools](https://en.wikipedia.org/wiki/Eight_Schools_Association).This model is small enough to write down, is hierarchical, uses labelling, and a centered parameterization causes [divergences](http://mc-stan.org/users/documentation/case-studies/divergences_and_bias.html) (which are interesting for illustration):
###Code
import pymc3 as pm
J = 8
y = np.array([28., 8., -3., 7., -1., 1., 18., 12.])
sigma = np.array([15., 10., 16., 11., 9., 11., 10., 18.])
schools = np.array(['Choate', 'Deerfield', 'Phillips Andover', 'Phillips Exeter',
'Hotchkiss', 'Lawrenceville', "St. Paul's", 'Mt. Hermon'])
with pm.Model() as centered_eight:
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=5)
theta = pm.Normal('theta', mu=mu, sd=tau, shape=J)
obs = pm.Normal('obs', mu=theta, sd=sigma, observed=y)
# This pattern is useful in PyMC3
prior = pm.sample_prior_predictive()
centered_eight_trace = pm.sample()
posterior_predictive = pm.sample_posterior_predictive(centered_eight_trace)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (4 chains in 4 jobs)
NUTS: [theta, tau, mu]
Sampling 4 chains: 100%|██████████| 4000/4000 [00:02<00:00, 1659.82draws/s]
There were 9 divergences after tuning. Increase `target_accept` or reparameterize.
The acceptance probability does not match the target. It is 0.679274648861103, but should be close to 0.8. Try to increase the number of tuning steps.
There were 15 divergences after tuning. Increase `target_accept` or reparameterize.
The acceptance probability does not match the target. It is 0.6222461372253658, but should be close to 0.8. Try to increase the number of tuning steps.
There were 32 divergences after tuning. Increase `target_accept` or reparameterize.
The acceptance probability does not match the target. It is 0.5960725891337998, but should be close to 0.8. Try to increase the number of tuning steps.
There were 31 divergences after tuning. Increase `target_accept` or reparameterize.
The acceptance probability does not match the target. It is 0.7091049759437956, but should be close to 0.8. Try to increase the number of tuning steps.
The estimated number of effective samples is smaller than 200 for some parameters.
100%|██████████| 2000/2000 [00:00<00:00, 4128.69it/s]
###Markdown
Most ArviZ functions work fine with `trace` objects from PyMC3:
###Code
az.plot_autocorr(centered_eight_trace, var_names=['mu', 'tau']);
###Output
_____no_output_____
###Markdown
Convert to InferenceDataFor much more powerful querying, analysis and plotting, we can use built-in ArviZ utilities to convert PyMC3 objects to xarray datasets. Note we are also giving some information about labelling.ArviZ is built to work with `InferenceData`, and the more *groups* it has access to, the more powerful analyses it can perform. Here is a plot of the trace, which is common in PyMC3 workflows. Note the intelligent labels.
###Code
data = az.from_pymc3(trace=centered_eight_trace,
prior=prior,
posterior_predictive=posterior_predictive,
coords={'school': schools},
dims={'theta': ['school'], 'obs': ['school']})
data
az.plot_trace(data);
###Output
###Markdown
Plotting with PyStan objectsArviZ is built with first class support for PyStan objects, and can plot raw `fit` objects in a reasonable manner. Here is the same centered eight schools model:
###Code
import pystan
schools_code = """
data {
int<lower=0> J;
real y[J];
real<lower=0> sigma[J];
}
parameters {
real mu;
real<lower=0> tau;
real theta[J];
}
model {
mu ~ normal(0, 5);
tau ~ cauchy(0, 5);
theta ~ normal(mu, tau);
y ~ normal(theta, sigma);
}
generated quantities {
vector[J] log_lik;
vector[J] y_hat;
for (j in 1:J) {
log_lik[j] = normal_lpdf(y[j] | theta[j], sigma[j]);
y_hat[j] = normal_rng(theta[j], sigma[j]);
}
}
"""
schools_dat = {'J': 8,
'y': [28, 8, -3, 7, -1, 1, 18, 12],
'sigma': [15, 10, 16, 11, 9, 11, 10, 18]}
sm = pystan.StanModel(model_code=schools_code, verbose=False)
fit = sm.sampling(data=schools_dat, iter=1000, chains=4)
az.plot_density(fit, var_names=['mu', 'tau']);
###Output
_____no_output_____
###Markdown
Again, converting to `InferenceData` (a netcdf datastore that loads data into `xarray` datasets), we can get much richer labelling and mixing of data. Here is a plot showing where the Hamiltonian sampler had divergences
###Code
data = az.from_pystan(posterior=fit,
posterior_predictive='y_hat',
observed_data=['y'],
log_likelihood='log_lik',
coords={'school': schools},
dims={'theta': ['school'], 'y': ['school'], 'log_lik': ['school'], 'y_hat': ['school'], 'theta_tilde': ['school']})
data
az.plot_pair(data, coords={'school': ['Choate', 'Deerfield', 'Phillips Andover']}, divergences=True);
###Output
_____no_output_____
###Markdown
ArviZ Quickstart
###Code
%matplotlib inline
import arviz as az
import numpy as np
# ArviZ ships with style sheets!
az.style.use('arviz-darkgrid')
###Output
_____no_output_____
###Markdown
Get started with plottingArviZ is designed to be used with libraries like [PyStan](https://pystan.readthedocs.io) and [PyMC3](https://docs.pymc.io), but works fine with raw numpy arrays.
###Code
az.plot_posterior(np.random.randn(100_000));
###Output
_____no_output_____
###Markdown
Plotting a dictionary of arrays, ArviZ will interpret each key as the name of a different random variable. Each row of an array is treated as an independent series of draws from the variable, called a _chain_. Below, we have 10 chains of 50 draws each for four different distributions.
###Code
size = (10, 50)
az.plot_forest({
'normal': np.random.randn(*size),
'gumbel': np.random.gumbel(size=size),
'student t': np.random.standard_t(df=6, size=size),
'exponential': np.random.exponential(size=size)
});
###Output
_____no_output_____
###Markdown
Plotting with PyMC3 objectsArviZ is designed to work well with high dimensional, labelled data. Consider the [eight schools model](http://andrewgelman.com/2014/01/21/everything-need-know-bayesian-statistics-learned-eight-schools/), which roughly tries to measure the effectiveness of SAT classes at eight different schools. To show off ArviZ's labelling, I give the schools the names of [a different eight schools](https://en.wikipedia.org/wiki/Eight_Schools_Association).This model is small enough to write down, is hierarchical, uses labelling, and a centered parameterization causes [divergences](http://mc-stan.org/users/documentation/case-studies/divergences_and_bias.html) (which are interesting for illustration):
###Code
import pymc3 as pm
J = 8
y = np.array([28., 8., -3., 7., -1., 1., 18., 12.])
sigma = np.array([15., 10., 16., 11., 9., 11., 10., 18.])
schools = np.array(['Choate', 'Deerfield', 'Phillips Andover', 'Phillips Exeter',
'Hotchkiss', 'Lawrenceville', "St. Paul's", 'Mt. Hermon'])
with pm.Model() as centered_eight:
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=5)
theta = pm.Normal('theta', mu=mu, sd=tau, shape=J)
obs = pm.Normal('obs', mu=theta, sd=sigma, observed=y)
# This pattern is useful in PyMC3
prior = pm.sample_prior_predictive()
centered_eight_trace = pm.sample()
posterior_predictive = pm.sample_posterior_predictive(centered_eight_trace)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (4 chains in 4 jobs)
NUTS: [theta, tau, mu]
Sampling 4 chains: 100%|██████████| 4000/4000 [00:02<00:00, 1659.82draws/s]
There were 9 divergences after tuning. Increase `target_accept` or reparameterize.
The acceptance probability does not match the target. It is 0.679274648861103, but should be close to 0.8. Try to increase the number of tuning steps.
There were 15 divergences after tuning. Increase `target_accept` or reparameterize.
The acceptance probability does not match the target. It is 0.6222461372253658, but should be close to 0.8. Try to increase the number of tuning steps.
There were 32 divergences after tuning. Increase `target_accept` or reparameterize.
The acceptance probability does not match the target. It is 0.5960725891337998, but should be close to 0.8. Try to increase the number of tuning steps.
There were 31 divergences after tuning. Increase `target_accept` or reparameterize.
The acceptance probability does not match the target. It is 0.7091049759437956, but should be close to 0.8. Try to increase the number of tuning steps.
The estimated number of effective samples is smaller than 200 for some parameters.
100%|██████████| 2000/2000 [00:00<00:00, 4128.69it/s]
###Markdown
Most ArviZ functions work fine with `trace` objects from PyMC3:
###Code
az.plot_autocorr(centered_eight_trace, var_names=['mu', 'tau']);
###Output
_____no_output_____
###Markdown
Convert to InferenceDataFor much more powerful querying, analysis and plotting, we can use built-in ArviZ utilities to convert PyMC3 objects to xarray datasets. Note we are also giving some information about labelling.ArviZ is built to work with `InferenceData`, and the more *groups* it has access to, the more powerful analyses it can perform. Here is a plot of the trace, which is common in PyMC3 workflows. Note the intelligent labels.
###Code
data = az.from_pymc3(trace=centered_eight_trace,
prior=prior,
posterior_predictive=posterior_predictive,
coords={'school': schools},
dims={'theta': ['school'], 'obs': ['school']})
data
az.plot_trace(data);
###Output
###Markdown
Plotting with PyStan objectsArviZ is built with first class support for PyStan objects, and can plot raw `fit` objects in a reasonable manner. Here is the same centered eight schools model:
###Code
import pystan
schools_code = """
data {
int<lower=0> J;
real y[J];
real<lower=0> sigma[J];
}
parameters {
real mu;
real<lower=0> tau;
real theta[J];
}
model {
mu ~ normal(0, 5);
tau ~ cauchy(0, 5);
theta ~ normal(mu, tau);
y ~ normal(theta, sigma);
}
generated quantities {
vector[J] log_lik;
vector[J] y_hat;
for (j in 1:J) {
log_lik[j] = normal_lpdf(y[j] | theta[j], sigma[j]);
y_hat[j] = normal_rng(theta[j], sigma[j]);
}
}
"""
schools_dat = {'J': 8,
'y': [28, 8, -3, 7, -1, 1, 18, 12],
'sigma': [15, 10, 16, 11, 9, 11, 10, 18]}
sm = pystan.StanModel(model_code=schools_code, verbose=False)
fit = sm.sampling(data=schools_dat, iter=1000, chains=4)
az.plot_density(fit, var_names=['mu', 'tau']);
###Output
_____no_output_____
###Markdown
Again, converting to `InferenceData` (a netcdf datastore that loads data into `xarray` datasets), we can get much richer labelling and mixing of data. Here is a plot showing where the Hamiltonian sampler had divergences
###Code
data = az.from_pystan(posterior=fit,
posterior_predictive='y_hat',
observed_data=['y'],
log_likelihood='log_lik',
coords={'school': schools},
dims={'theta': ['school'], 'y': ['school'], 'log_lik': ['school'], 'y_hat': ['school'], 'theta_tilde': ['school']})
data
az.plot_pair(data, coords={'school': ['Choate', 'Deerfield', 'Phillips Andover']}, divergences=True);
###Output
_____no_output_____
###Markdown
ArviZ Quickstart
###Code
%matplotlib inline
import arviz as az
import numpy as np
# ArviZ ships with style sheets!
az.style.use('arviz-darkgrid')
###Output
_____no_output_____
###Markdown
Get started with plottingArviZ is designed to be used with libraries like [PyStan](https://pystan.readthedocs.io) and [PyMC3](https://docs.pymc.io), but works fine with raw numpy arrays.
###Code
az.plot_posterior(np.random.randn(100_000));
###Output
_____no_output_____
###Markdown
Plotting a dictionary of arrays, ArviZ will interpret each key as the name of a different random variable. Each row of an array is treated as an independent series of draws from the variable, called a _chain_. Below, we have 10 chains of 50 draws each for four different distributions.
###Code
size = (10, 50)
az.plot_forest({
'normal': np.random.randn(*size),
'gumbel': np.random.gumbel(size=size),
'student t': np.random.standard_t(df=6, size=size),
'exponential': np.random.exponential(size=size)
});
###Output
_____no_output_____
###Markdown
Plotting with PyMC3 objectsArviZ is designed to work well with high dimensional, labelled data. Consider the [eight schools model](http://andrewgelman.com/2014/01/21/everything-need-know-bayesian-statistics-learned-eight-schools/), which roughly tries to measure the effectiveness of SAT classes at eight different schools. To show off ArviZ's labelling, I give the schools the names of [a different eight schools](https://en.wikipedia.org/wiki/Eight_Schools_Association).This model is small enough to write down, is hierarchical, uses labelling, and a centered parameterization causes [divergences](http://mc-stan.org/users/documentation/case-studies/divergences_and_bias.html) (which are interesting for illustration):
###Code
import pymc3 as pm
J = 8
y = np.array([28., 8., -3., 7., -1., 1., 18., 12.])
sigma = np.array([15., 10., 16., 11., 9., 11., 10., 18.])
schools = np.array(['Choate', 'Deerfield', 'Phillips Andover', 'Phillips Exeter',
'Hotchkiss', 'Lawrenceville', "St. Paul's", 'Mt. Hermon'])
with pm.Model() as centered_eight:
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=5)
theta = pm.Normal('theta', mu=mu, sd=tau, shape=J)
obs = pm.Normal('obs', mu=theta, sd=sigma, observed=y)
# This pattern is useful in PyMC3
prior = pm.sample_prior_predictive()
centered_eight_trace = pm.sample()
posterior_predictive = pm.sample_posterior_predictive(centered_eight_trace)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (4 chains in 4 jobs)
NUTS: [theta, tau, mu]
Sampling 4 chains: 100%|██████████| 4000/4000 [00:03<00:00, 1291.48draws/s]
There were 12 divergences after tuning. Increase `target_accept` or reparameterize.
The acceptance probability does not match the target. It is 0.6803070156848937, but should be close to 0.8. Try to increase the number of tuning steps.
There were 4 divergences after tuning. Increase `target_accept` or reparameterize.
There were 2 divergences after tuning. Increase `target_accept` or reparameterize.
There were 13 divergences after tuning. Increase `target_accept` or reparameterize.
The acceptance probability does not match the target. It is 0.6862081649203211, but should be close to 0.8. Try to increase the number of tuning steps.
The estimated number of effective samples is smaller than 200 for some parameters.
100%|██████████| 500/500 [00:00<00:00, 4097.94it/s]
###Markdown
Most ArviZ functions work fine with `trace` objects from PyMC3:
###Code
az.plot_autocorr(centered_eight_trace, var_names=['mu', 'tau']);
###Output
_____no_output_____
###Markdown
Convert to InferenceDataFor much more powerful querying, analysis and plotting, we can use built-in ArviZ utilities to convert PyMC3 objects to xarray datasets. Note we are also giving some information about labelling.ArviZ is built to work with `InferenceData`, and the more *groups* it has access to, the more powerful analyses it can perform. Here is a plot of the trace, which is common in PyMC3 workflows. Note the intelligent labels.
###Code
data = az.from_pymc3(trace=centered_eight_trace,
prior=prior,
posterior_predictive=posterior_predictive,
coords={'school': schools},
dims={'theta': ['school'], 'obs': ['school']})
data
az.plot_trace(data);
###Output
_____no_output_____
###Markdown
Plotting with PyStan objectsArviZ is built with first class support for PyStan objects, and can plot raw `fit` objects in a reasonable manner. Here is the same centered eight schools model:
###Code
import pystan
schools_code = """
data {
int<lower=0> J;
real y[J];
real<lower=0> sigma[J];
}
parameters {
real mu;
real<lower=0> tau;
real theta[J];
}
model {
mu ~ normal(0, 5);
tau ~ cauchy(0, 5);
theta ~ normal(mu, tau);
y ~ normal(theta, sigma);
}
generated quantities {
vector[J] log_lik;
vector[J] y_hat;
for (j in 1:J) {
log_lik[j] = normal_lpdf(y[j] | theta[j], sigma[j]);
y_hat[j] = normal_rng(theta[j], sigma[j]);
}
}
"""
schools_dat = {'J': 8,
'y': [28, 8, -3, 7, -1, 1, 18, 12],
'sigma': [15, 10, 16, 11, 9, 11, 10, 18]}
sm = pystan.StanModel(model_code=schools_code, verbose=False)
fit = sm.sampling(data=schools_dat, iter=1000, chains=4)
az.plot_density(fit, var_names=['mu', 'tau']);
###Output
_____no_output_____
###Markdown
Again, converting to `InferenceData` (a netcdf datastore that loads data into `xarray` datasets), we can get much richer labelling and mixing of data. Here is a plot showing where the Hamiltonian sampler had divergences
###Code
data = az.from_pystan(fit=fit,
posterior_predictive='y_hat',
observed_data=['y'],
log_likelihood='log_lik',
coords={'school': schools},
dims={'theta': ['school'], 'y': ['school'], 'log_lik': ['school'], 'y_hat': ['school'], 'theta_tilde': ['school']})
data
az.plot_pair(data, coords={'school': ['Choate', 'Deerfield', 'Phillips Andover']}, divergences=True);
###Output
/home/colin/miniconda3/envs/arviz3.6/lib/python3.6/site-packages/matplotlib/figure.py:2299: UserWarning: This figure includes Axes that are not compatible with tight_layout, so results might be incorrect.
warnings.warn("This figure includes Axes that are not compatible "
###Markdown
Quick introduction
###Code
%matplotlib inline
import arviz as az # import arviz
az.style.use('arviz-darkgrid') # This is just and alias for plt.style
###Output
_____no_output_____
###Markdown
Most or ArviZ functions require a `trace` as the main argument. Currently ArviZ will work with either a PyMC3 `trace` or a Pandas DataFrame. Internally ArviZ turns the former into the latter. To show some of the functionality or ArviZ we are going to load a previously saved trace. Arviz can save traces as (optionally compressed) csv files. az.utils.save_trace(trace)
###Code
trace = az.utils.load_trace('trace.gzip')
trace.head()
###Output
_____no_output_____
###Markdown
This DataFrame comes from a model with two variables `a` and `b`. Variable `b` has shape 2 and thus we have the columns names `b__0` and `b__1`. If the trace come from a sampling process with more than one chain, like in this example, we will get duplicated columns names. If you want to combine the samples from different chains you can pass `combined=True`.
###Code
az.utils.load_trace('trace.gzip', combined=True).head()
###Output
_____no_output_____
###Markdown
ArviZ plots are build on top of matplotlib and hence you can edit them using matplotlib or change the overall aesthetic of plots by changing the styles. Arviz is distributed with two styles, both of them are based on seaborn-darkgrid and seaborn-whitegrid, and follow a similar name pattern, i.e. `arviz-darkgrid` and `arviz-darkgrid`. The main differences are:* Default matplotlib font (no problem using Greek letters!)* Default matplotlib cmap, viridis* Larger font size for several elements* Colorblind friendlier color cycle designed using https://colorcyclepicker.mpetroff.net/To change the style you can just do:
###Code
az.style.use('arviz-darkgrid') # This is just and alias for plt.style
###Output
_____no_output_____
###Markdown
One of the most common plots used in Bayesian statistics is the `traceplot`.
###Code
az.traceplot(trace);
###Output
_____no_output_____
###Markdown
On the left column we have the distributions of sampled values, histograms are used for discrete variables and kde for continuous ones. Notice that the variable `b` (with shape=2) is represented into 2 separated plot, one per axis. Chains are plotted using different colors blue ('C0') for the first chain and orange ('C1') for the second one. On the right we have the values of variables as a function of sampling step (1000 in this example).You can specify the variables you want to plot by passing their names to `varnames`.
###Code
az.traceplot(trace, varnames=['b']);
###Output
_____no_output_____
###Markdown
Notice that we pass `b` and we still got both axis, without needing to specify them explicitly.ArviZ is not also about plots, we can get also a text summary from the trace
###Code
az.summary(trace)
###Output
_____no_output_____
###Markdown
ArviZ Quickstart
###Code
%matplotlib inline
import arviz as az
import numpy as np
# ArviZ ships with style sheets!
az.style.use('arviz-darkgrid')
###Output
_____no_output_____
###Markdown
Get started with plottingArviZ is designed to be used with libraries like [PyStan](https://pystan.readthedocs.io) and [PyMC3](https://docs.pymc.io), but works fine with raw numpy arrays.
###Code
az.plot_posterior(np.random.randn(100_000));
###Output
_____no_output_____
###Markdown
Plotting a dictionary of arrays, ArviZ will interpret each key as the name of a different random variable. Each row of an array is treated as an independent series of draws from the variable, called a _chain_. Below, we have 10 chains of 50 draws each for four different distributions.
###Code
size = (10, 50)
az.plot_forest({
'normal': np.random.randn(*size),
'gumbel': np.random.gumbel(size=size),
'student t': np.random.standard_t(df=6, size=size),
'exponential': np.random.exponential(size=size)
});
###Output
_____no_output_____
###Markdown
Plotting with PyMC3 objectsArviZ is designed to work well with high dimensional, labelled data. Consider the [eight schools model](http://andrewgelman.com/2014/01/21/everything-need-know-bayesian-statistics-learned-eight-schools/), which roughly tries to measure the effectiveness of SAT classes at eight different schools. To show off ArviZ's labelling, I give the schools the names of [a different eight schools](https://en.wikipedia.org/wiki/Eight_Schools_Association).This model is small enough to write down, is hierarchical, uses labelling, and a centered parameterization causes [divergences](http://mc-stan.org/users/documentation/case-studies/divergences_and_bias.html) (which are interesting for illustration):
###Code
import pymc3 as pm
J = 8
y = np.array([28., 8., -3., 7., -1., 1., 18., 12.])
sigma = np.array([15., 10., 16., 11., 9., 11., 10., 18.])
schools = np.array(['Choate', 'Deerfield', 'Phillips Andover', 'Phillips Exeter',
'Hotchkiss', 'Lawrenceville', "St. Paul's", 'Mt. Hermon'])
with pm.Model() as centered_eight:
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=5)
theta = pm.Normal('theta', mu=mu, sd=tau, shape=J)
obs = pm.Normal('obs', mu=theta, sd=sigma, observed=y)
# This pattern is useful in PyMC3
prior = pm.sample_prior_predictive()
centered_eight_trace = pm.sample()
posterior_predictive = pm.sample_posterior_predictive(centered_eight_trace)
###Output
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (4 chains in 4 jobs)
NUTS: [theta, tau, mu]
Sampling 4 chains: 100%|██████████| 4000/4000 [00:02<00:00, 1659.82draws/s]
There were 9 divergences after tuning. Increase `target_accept` or reparameterize.
The acceptance probability does not match the target. It is 0.679274648861103, but should be close to 0.8. Try to increase the number of tuning steps.
There were 15 divergences after tuning. Increase `target_accept` or reparameterize.
The acceptance probability does not match the target. It is 0.6222461372253658, but should be close to 0.8. Try to increase the number of tuning steps.
There were 32 divergences after tuning. Increase `target_accept` or reparameterize.
The acceptance probability does not match the target. It is 0.5960725891337998, but should be close to 0.8. Try to increase the number of tuning steps.
There were 31 divergences after tuning. Increase `target_accept` or reparameterize.
The acceptance probability does not match the target. It is 0.7091049759437956, but should be close to 0.8. Try to increase the number of tuning steps.
The estimated number of effective samples is smaller than 200 for some parameters.
100%|██████████| 2000/2000 [00:00<00:00, 4128.69it/s]
###Markdown
Most ArviZ functions work fine with `trace` objects from PyMC3:
###Code
az.plot_autocorr(centered_eight_trace, var_names=['mu', 'tau']);
###Output
_____no_output_____
###Markdown
Convert to InferenceDataFor much more powerful querying, analysis and plotting, we can use built-in ArviZ utilities to convert PyMC3 objects to xarray datasets. Note we are also giving some information about labelling.ArviZ is built to work with `InferenceData`, and the more *groups* it has access to, the more powerful analyses it can perform. See the `InferenceData` structure specification [here](../schema/schema.html). Here is a plot of the trace, which is common in PyMC3 workflows. Note the intelligent labels.
###Code
data = az.from_pymc3(trace=centered_eight_trace,
prior=prior,
posterior_predictive=posterior_predictive,
coords={'school': schools},
dims={'theta': ['school'], 'obs': ['school']})
data
az.plot_trace(data);
###Output
###Markdown
Plotting with PyStan objectsArviZ is built with first class support for PyStan objects, and can plot raw `fit` objects in a reasonable manner. Here is the same centered eight schools model:
###Code
import pystan
schools_code = """
data {
int<lower=0> J;
real y[J];
real<lower=0> sigma[J];
}
parameters {
real mu;
real<lower=0> tau;
real theta[J];
}
model {
mu ~ normal(0, 5);
tau ~ cauchy(0, 5);
theta ~ normal(mu, tau);
y ~ normal(theta, sigma);
}
generated quantities {
vector[J] log_lik;
vector[J] y_hat;
for (j in 1:J) {
log_lik[j] = normal_lpdf(y[j] | theta[j], sigma[j]);
y_hat[j] = normal_rng(theta[j], sigma[j]);
}
}
"""
schools_dat = {'J': 8,
'y': [28, 8, -3, 7, -1, 1, 18, 12],
'sigma': [15, 10, 16, 11, 9, 11, 10, 18]}
sm = pystan.StanModel(model_code=schools_code, verbose=False)
fit = sm.sampling(data=schools_dat, iter=1000, chains=4)
az.plot_density(fit, var_names=['mu', 'tau']);
###Output
_____no_output_____
###Markdown
Again, converting to `InferenceData` (a netcdf datastore that loads data into `xarray` datasets), we can get much richer labelling and mixing of data. Here is a plot showing where the Hamiltonian sampler had divergences
###Code
data = az.from_pystan(posterior=fit,
posterior_predictive='y_hat',
observed_data=['y'],
log_likelihood='log_lik',
coords={'school': schools},
dims={'theta': ['school'], 'y': ['school'], 'log_lik': ['school'], 'y_hat': ['school'], 'theta_tilde': ['school']})
data
az.plot_pair(data, coords={'school': ['Choate', 'Deerfield', 'Phillips Andover']}, divergences=True);
###Output
_____no_output_____ |
pycon-pandas-tutorial/All.ipynb | ###Markdown
Years
###Code
# 1. How many movies are listed in the `titles` dataframe?
len(titles)
# 1. What is the name and year of the very first movie ever made?
titles.sort_values('year').head(1)
# 1. How many years into the future does the IMDB database list movie titles?
titles.sort_values('year').tail(3)#.year - 2015
# 1. How many movies listed in `titles` came out in 1950?
len(titles[titles.year == 1950])
# or: (titles.year == 1950).sum()
# 1. How many movies came out in 1960?
len(titles[titles.year == 1960])
# 1. How many movies came out in each year of the 1970s?
# (Hint: try a Python "for" loop.)
for y in range(1970, 1980):
print(y, (titles.year == y).sum())
# 1. How many movies came out during your own lifetime,
# from the year of your birth through 2014?
len(titles[(titles.year >= 1974) & (titles.year <= 2014)])
# 2. Use "value_counts" to determine how many movies came out
# in each year of the 1970s.
titles[titles.year // 10 == 197].year.value_counts().sort_index()
# 3. Use "groupby" to determine how many movies came out in each year of the 1970s.
titles.groupby('year').size().loc[1970:1979]
###Output
_____no_output_____
###Markdown
Titles
###Code
# 1. What are the names of the movies made through 1906?
titles[titles.year <= 1906][['title']]
# 1. What movies have titles that fall between Star Trek and Star Wars in the alphabet?
titles[(titles.title >= 'Star Trek') & (titles.title <= 'Star Wars')]
# 2. Use an index and .loc[] to find the movies whose titles fall between Star Trek
# and Star Wars in the alphabet.
t = titles.copy()
t = t.set_index('title').sort_index()
t.loc['Star Trek':'Star Wars']
# 2. Use an index and .loc[] to retrieve the names of the movies made through 1906.
titles.set_index('year').sort_index().loc[1800:1906]
# 2. What are the 15 most common movie titles in film history?
titles.title.value_counts().head(15)
# Use this for session 3?
i = cast.set_index('name').sort_index()
a = i.loc['Cary Grant',['year','n']].groupby('year').agg(['min', 'mean', 'max'])
a.loc[:1942].plot(kind='area', stacked=False)
a
# 5. What are the 5 longest movie titles ever?
pd.set_option('max_colwidth', 300)
t = titles.copy()
t['len'] = t.title.str.len()
t = t.sort_values('len', ascending=False)
t.head()
# 5. What are the 15 most popular movie titles, if you strip off the suffixes like
# (II) and (III) that the IMDB adds to distinguish movies shown in the same year?
titles.title.str.extract('^([^(]*)').value_counts().head(15)
###Output
_____no_output_____
###Markdown
How many movies actors have been in
###Code
# 1. How many movies has Judi Dench acted in?
len(cast[cast.name == 'Judi Dench'])
# 1. How many movies did Sidney Poitier appear in?
c = cast
c = c[c.name == 'Sidney Poitier']
len(c)
# 1. In how many of his movies was Sidney Poitier the lead (`n==1`)?
c = cast
c = c[c.name == 'Sidney Poitier']
c = c[c.n == 1]
len(c)
###Output
_____no_output_____
###Markdown
Pulling and displaying movie credits
###Code
# 1. List the movies, sorted by year, in which Judi Dench starred as lead actor.
c = cast
c = c[c.name == 'Judi Dench']
c = c[c.n == 1]
c.sort_values('year')
# 1. Who was credited in the 1972 version of Sleuth, in order by `n` rank?
c = cast
c = c[c.title == 'Sleuth']
c = c[c.year == 1972]
c.sort_values('n')
###Output
_____no_output_____
###Markdown
Common character names
###Code
# 2. What are the 11 most common character names in movie history?
cast.character.value_counts().head(11)
# 3. Which actors have played the role “Zombie” the most times?
c = cast
c = c[c.character == 'Zombie']
c = c.groupby('name').size().order()
c.tail(5)
# 3. Which ten people have appeared most often as “Herself” over the history of film?
c = cast
c = c[c.character == 'Herself']
c = c.groupby('name').size().order()
c.tail(10)
# 3. Which ten people have appeared most often as “Himself” over the history of film?
c = cast
c = c[c.character == 'Himself']
c = c.groupby('name').size().order()
c.tail(10)
# 4. Take the 50 most common character names in film.
# Which are most often played by men?
c = cast
clist = c.character.value_counts().head(50)
clist.head()
clist.tail()
cast_by_character = cast.sort_values('character').set_index('character')
c = cast_by_character.loc[clist.index][['type']]
c = c.reset_index()
c = c.groupby(['character', 'type']).size()
c = c.unstack()
c['ratio'] = c.actress / (c.actor + c.actress)
c = c.sort_values('ratio')
c.head()
# 4. …which of those 50 characters are most often played by women?
c.tail()
# 4. …which of those 50 characters have a ratio closest to 0.5?
c[(c.ratio > 0.4) & (c.ratio < 0.6)]
###Output
_____no_output_____
###Markdown
Who has been in the most movies
###Code
# 2. Which actors or actresses appeared in the most movies in the year 1945?
cast[cast.year == 1945].name.value_counts().head(10)
# 2. Which actors or actresses appeared in the most movies in the year 1985?
cast[cast.year == 1985].name.value_counts().head(10)
%%time
# 2. Create a `cast_by_title_year` dataframe indexed by title and year
# to use in the next few questions.
cast_by_title_year = cast.set_index(['title', 'year']).sort_index()
cast_by_title_year.head()
%%time
# 2. Use `cast_by_title_year` to find the stars of the film Inception
# and order them by `n` before displaying the top 10.
cast_by_title_year.loc['Inception'].sort_values('n').head(10)
# 2. Use `cast_by_title_year` to find the first 10 stars in the 1996 film Hamlet,
# and order them by `n`.
cast_by_title_year.loc['Hamlet',1996].sort_values('n').head(10)
%%time
# 2. Write a `for` loop that, for the top 9 actors in the 1977 movie Star Wars,
# determines how many movies they starred in after 1977.
names = cast_by_title_year.loc['Star Wars',1977].sort_values('n').head(9).name
for name in names:
print(name, len(cast[(cast.name == name) & (cast.year > 1977)]))
# 2. Create an indexed version of `cast` that, once built, lets you answer
# the previous question with a `for` loop that finishes in under a second.
i = cast.set_index('name').sort_index()
%%time
for name in names:
c = i.loc[name]
c = c[c.year > 1977]
#c = c[(c.character != 'Himself') & (c.character != 'Herself')]
print(name, len(c))
# 3. How many people were cast in each of the movies named "Hamlet”?
c = cast
c = c[c.title == 'Hamlet']
c = c.groupby('year').size()
c
# 5. How many actors are in the cast of each version of Hamlet,
# including Hamlets with IMDB name collisions like "Hamlet (II)"
# and "Hamlet (III)"? [BAD]
c = cast_by_title_year
# c.loc['Hamlet':'Hamlet (Z'].index.value_counts() - Drat
# c.loc['Hamlet':'Hamlet (Z'].groupby(level=0).size() - Drat
# c.loc['Hamlet':'Hamlet (Z'].groupby(level=1).size() - Drat
c.loc['Hamlet':'Hamlet (Z'].groupby(level=[0,1]).size()
# Or:
#c = cast[(cast.title >= 'Hamlet') & (cast.title < 'Hamlet (Z')]
#c.groupby(['title', 'year']).size()
###Output
_____no_output_____
###Markdown
Actors and Actresses
###Code
# 4. Build a dataframe with a row for each year with two columns:
# the number of roles for actors in that year's films,
# and the number of roles for actresses.
aa = cast[['year', 'type']].groupby(['year', 'type']).size()
aa = aa.loc[:2014].unstack()
aa.head()
# 4. Use that dataframe to make a kind='area' plot showing the total
# number of roles available over the history of film.
aa.plot(kind='area')
f = aa.actor / (aa.actor + aa.actress)
f.plot(ylim=[0,1], kind='area')
c = cast
#c = c[c.year // 10 == 198]
c = c[c.n <= 3]
c = c.groupby(['year', 'type', 'n']).size()
c = c.unstack(1)
c.swaplevel(0,1).loc[1].plot(ylim=0, kind='area')
#f = c.actor / (c.actor + c.actress)
#f = f.unstack()
#f.plot(ylim=[0,1])
###Output
_____no_output_____
###Markdown
Rank over time
###Code
# 2. Define “leading actor” as an actor or actress whose `n==1`
# and “supporting actor” as `n==2` — what is the average year
# of all the supporting roles Judi Dench has had?
c = cast
c = c[c.name == 'Judi Dench']
print(c[c.n == 2].year.mean())
# 2. What is the average year of Judi Dench’s leading roles —
# is her career moving forwards toward leading roles
# or backwards towards supporting ones?
print(c[c.n == 1].year.mean())
# 2. Did Sidney Poitier move forward or back over his career?
c = cast
c = c[c.name == 'Sidney Poitier']
print(c[c.n == 2].year.mean())
print(c[c.n == 1].year.mean())
# 2. What about Michael Caine?
c = cast
c = c[c.name == 'Michael Caine']
print(c[c.n == 2].year.mean())
print(c[c.n == 1].year.mean())
c = cast
#c = c[c.year // 10 == 195]
c = c[c.n.notnull()].groupby('name').n.agg(['size', 'mean'])
c.head()
c = c[c['size'] >= 10]
c = c.sort_values('mean')
c.head(60)
###Output
_____no_output_____
###Markdown
Release dates
###Code
release_dates.head()
# 5. In which month is a movie whose name starts with the text
# "The Lord of the Rings" most likely to be released?
r = release_dates
r = r[r.title.str.startswith('The Lord of the Rings')]
r = r[r.country == 'USA']
r.date.dt.month.value_counts()
# 5. In which months is a movie whose name ends in the word "Christmas"
# most likely to be released?
r = release_dates
r = r[r.title.str.endswith('Christmas')]
r = r[r.country == 'USA']
r.date.dt.month.value_counts()
rd = release_dates.set_index(['title', 'year']).sort_index()
rd.head()
rd.loc[[('#Beings', 2015), ('#Horror', 2015)]]
c = cast
c = c[c.name == 'Tom Cruise'][['title', 'year']].drop_duplicates()
#c = c.join(rd, ['title', 'year'])
#c = c[c.country == 'USA']
#c.date.dt.month.value_counts().sort_index().plot(kind='bar')
c.values
# ASK
# rd.loc[c]
# rd.loc[c.values]
# rd.loc[list(c.values)]
# 5. In what months of the year have Helen Mirren movies been most often released?
c = cast
c = c[c.name == 'Helen Mirren'][['title', 'year']].drop_duplicates()
c = c.join(rd, ['title', 'year'])
c = c[c.country == 'USA']
c.date.dt.month.value_counts().sort_index().plot(kind='bar')
# 5. …Jeff Bridges movies?
c = cast
c = c[c.name == 'Jeff Bridges'][['title', 'year']].drop_duplicates()
c = c.join(rd, ['title', 'year'])
c = c[c.country == 'USA']
c.date.dt.month.value_counts().sort_index().plot(kind='bar')
# 5. …Tom Cruise movies?
c = cast
c = c[c.name == 'Tom Cruise'][['title', 'year']].drop_duplicates()
c = c.join(rd, ['title', 'year'])
c = c[c.country == 'USA']
c.date.dt.month.value_counts().sort_index().plot(kind='bar')
%%time
# 5. Use join() to build a table of release dates indexed by actor,
# and use it to re-run the previous three questions efficiently.
c = cast
c = c[['name', 'title', 'year']]
c = c.join(rd, ['title', 'year'])
c = c[c.country == 'USA']
c = c.set_index('name').sort_index()
releases = c
releases.head()
releases.loc['Tom Cruise'].date.dt.month.value_counts().sort_index().plot(kind='bar')
# pivot(self, index=None, columns=None, values=None)
cast.head()
c = cast
c = c[c.year >= 1990]
c = c[c.year <= 1993]
c = c[c.name == 'George Clooney']
#c = c[c.title == 'Inception']
#c = c[c.n.notnull()]
#c = c.pivot('name', 'year', 'title')
c.fillna('')
release_dates.head()
r = release_dates
r = r[r.title.str.startswith('Star Wars: Episode')]
r = r[r.country.str.startswith('U')]
r.pivot('title', 'country', 'date')
#r.pivot('country', 'title', 'date')
r = release_dates
r = r[r.title.str.startswith('Star Wars: Episode')]
r = r[r.country.str.startswith('U')]
r.set_index(['title', 'country'])[['date']].unstack()
cast.head()
t = titles
t.head()
c = cast
c = c[c.title == 'Hamlet']
c = c.set_index(['year', 'character'])#.unstack('type')
c
c = cast
c = c[c.title == 'Hamlet']
c = c.set_index(['year', 'type'])#.unstack('type')
c
###Output
_____no_output_____ |
data_analysis/audiocommons_ffont/tempo_estimation/07_classifier_of_good_and_bad_bpm_estimates.ipynb | ###Markdown
Classifier for good and bad BPM estimates
###Code
def condition_good_estimate(key, item, data):
result = metric_close_bpm(data, METHOD, tolerance=0, sound_ids=[key])
if len(result) > 0 and result[0] == 1:
return True
else:
return False
def condition_wrong_estimate(key, item, data):
result = metric_close_bpm(data, METHOD, tolerance=0, sound_ids=[key])
if len(result) > 0 and result[0] == 0:
return True
else:
return False
base = 'analysis.FS_onset_rate_count'
features = [
('onset_rate', '%s.rhythm.onset_rate' % base),
('onset_count', '%s.rhythm.onset_count' % base),
]
def return_feature_vector(data_item):
vector = list()
for fname, fpath in features:
vector.append(vfkp(data_item, fpath))
return vector
for count, dataset in enumerate(datasets):
print title('Training classifier for %s' % dataset.name)
# Separate good and bad estimates
correctly_estimated = dataset.filter_data(condition=condition_good_estimate)
wrongly_estimated = dataset.filter_data(condition=condition_wrong_estimate)
# Prepare data to feed classifier
X = list() # List of feature vectors
y = list() # List of good and bad estimates labels
for item in correctly_estimated.data.values():
feature_vector = return_feature_vector(item)
if feature_vector is not None: # Skip vectors with nan or inf values
X.append(feature_vector)
y.append('good estimate')
for item in wrongly_estimated.data.values():
feature_vector = return_feature_vector(item)
if feature_vector is not None: # Skip vectors with nan or inf values
X.append(feature_vector)
y.append('bad estimate')
# Train SVM
print "Training and evaluating linear SVM classifier..."
svm_clf = svm.SVC(kernel='linear')
print ' Accuracy: %.2f' % np.mean(cross_validation.cross_val_score(svm_clf, X, y, scoring='accuracy', cv=10))
# Train decision tree with different depths
for depth in [1, 2, 3, 4, 5, 10]:
print "Training and evaluating decision tree classifier (depth=%i)..." % depth
tree_clf = tree.DecisionTreeClassifier(max_depth=depth)
print ' Accuracy: %.2f' % np.mean(cross_validation.cross_val_score(tree_clf, X, y, scoring='accuracy', cv=10))
# Training decision tree for export (gets better accuracy and is easier to interpret)
fitted_tree_clf = tree_clf.fit(X, y)
# Export classifier output in dot format (for further inspection)
with open(os.path.join(settings.TEMPO_ESTIMATION_OUT_PATH, 'tree_clf_%s_depth_%i.dot' % (dataset.short_name, depth)), 'w') as f:
f = tree.export_graphviz(fitted_tree_clf, feature_names=[fname for fname, fpath in features], out_file=f)
# Export classifier as pickle so we can load it later
joblib.dump(fitted_tree_clf, os.path.join(settings.TEMPO_ESTIMATION_OUT_PATH, 'tree_clf_%s_depth_%i.pkl' % (dataset.short_name, depth)))
# Transform .dot data into pdfs using GraphViz (required dot command line tool, see example in http://scikit-learn.org/stable/modules/tree.html#classification)
from ac_utils.graph import simplify_dot_tree
for filename in os.listdir(settings.TEMPO_ESTIMATION_OUT_PATH):
if filename.endswith('.dot') and not '.simp' in filename:
in_filename = os.path.join(settings.TEMPO_ESTIMATION_OUT_PATH, filename)
in_filename = simplify_dot_tree(in_filename, ['wrong estimate', 'good estimate'])
out_filename = os.path.join(settings.TEMPO_ESTIMATION_OUT_PATH, filename.replace('.dot', '.pdf'))
os.system('dot -Tpdf %s -o %s' % (in_filename, out_filename))
###Output
_____no_output_____ |
geomodeling/implicit_circle.ipynb | ###Markdown
Implicit circle
###Code
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from itertools import product, combinations
fig = plt.figure(figsize=(12,12))
ax = fig.gca(projection='3d')
ax.set_aspect("equal")
# draw cube
r = [-1, 1]
for s, e in combinations(np.array(list(product(r, r, r))), 2):
if np.sum(np.abs(s-e)) == r[1]-r[0]:
ax.plot3D(*zip(s, e), color="b")
# draw sphere
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
x = np.cos(u)*np.sin(v)
y = np.sin(u)*np.sin(v)
z = np.cos(v)
ax.plot_wireframe(x, y, z, color="r")
# draw a point
ax.scatter([0], [0], [0], color="g", s=100)
# draw a vector
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
# add surface
point = np.array([1, 0, 0])
normal = np.array([0, 0, 1])
# a plane is a*x+b*y+c*z+d=0
# [a,b,c] is the normal. Thus, we have to calculate
# d and we're set
d = -point.dot(normal)
# create x,y
xx, yy = np.meshgrid(range(10), range(10))
# calculate corresponding z
z = (-normal[0] * xx - normal[1] * yy - d) * 1. /normal[2]
ax.plot_surface(xx, yy, z)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
point = np.array([1, 2, 3])
normal = np.array([0, 0, 1])
# a plane is a*x+b*y+c*z+d=0
# [a,b,c] is the normal. Thus, we have to calculate
# d and we're set
d = -point.dot(normal)
# create x,y
xx, yy = np.meshgrid(range(10), range(10))
# calculate corresponding z
z = (-normal[0] * xx - normal[1] * yy - d) * 1. /normal[2]
# plot the surface
plt3d = plt.figure(figsize=(12,12)).gca(projection='3d')
plt3d.plot_surface(xx, yy, z)
# draw sphere
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
x = np.cos(u)*np.sin(v)
y = np.sin(u)*np.sin(v)
z = np.cos(v)
ax.plot_wireframe(x, y, z, color="r")
plt.show()
###Output
_____no_output_____ |
_notebooks/2021-05-06-sphinx-algorithm-overview.ipynb | ###Markdown
Sphinx Algorithm Explained> Explaining how the SPHINX password scheme works and why its magic- toc: true - badges: true- comments: true- categories: [jupyter,security,sphinx,somethingAwesome] > youtube: https://youtu.be/jta72Zj-l14
###Code
import ecdsa.ellipticcurve as ecc
import ecdsa.numbertheory as nt
###Output
_____no_output_____
###Markdown
Preliminary: Diffie Hellman Shared SecretElliptic Cryptography is based on the [Discrete Logarithm Problem](https://jeremykun.com/2014/03/31/elliptic-curve-diffie-hellman/). In essense, we can simplify the idea to> Adding is easy on elliptic curves, but **undoing** addition seems hardMore formally, we can give the following definition: Let $G$ be an additive group, and let $x,y$ be elements of $G$ so that $x=ny$ for some integer $n$. The Discrete Logarithm Problem asks one to find $n$ when given $x$ and $y$.In integers, this problem is quite easy.If I have:- $x=12$- $y=4185072$Then, I can compute that $y=41805072=348756*12=348756x \rightarrow n=348756$.> Division for integers is efficient, **but for elliptic curves this is not the case**.Here is a toy problem testing my understanding of DH's shared secret protocol.
###Code
# Diffie Hellman Shared Secret POC
def sendDH(privateKey, generator, sendFunction):
return sendFunction(privateKey * generator)
def receiveDH(privateKey, receiveFunction):
return privateKey * receiveFunction()
prime = 3851
a = 324
b = 1287
myCurve = ecc.CurveFp(prime, a, b, 1)
basePoint = ecc.Point(myCurve, 920, 303)
aliceSecretKey = 233 # generateSecretKey(8)
bobSecretKey = 25 # generateSecretKey(8)
alicePublicKey = sendDH(aliceSecretKey, basePoint, lambda x:x)
bobPublicKey = sendDH(bobSecretKey, basePoint, lambda x:x)
sharedSecret1 = receiveDH(bobSecretKey, lambda: alicePublicKey)
sharedSecret2 = receiveDH(aliceSecretKey, lambda: bobPublicKey)
print(myCurve)
print(basePoint)
print('Shared secret is %s == %s' % (sharedSecret1, sharedSecret2))
###Output
CurveFp(p=3851, a=324, b=1287, h=1)
(920,303)
Shared secret is (1001,3826) == (1001,3826)
###Markdown
Algorithm OverviewSteps to implement1. Hashing password into elliptic curve - PWD entered by user - SHA-256 computation, hashing into the NIST P-256 Curve - this is computed on input + iteration_counter $\rightarrow Z_q$. - computed value is considered $x$ coord of a point on curve if $y$-value is associated with it is a quadratic residue (i.e. $x,y$ satisfy the curve equation). - this is repeated until a curve element is obtained. (which is the output) - Password is concatenated with domain name and input into H'. (ADD RESISTANCE AGAINST PHISHING)2. FK-PTR OPRF protocol 1. EXTENSION: - Blind the password with OPRF - OPRF: $F_k(x)=H(x,(H'(x))^k)$ - input $x$ from client - $k$ is from device - H maps from arbitrary length string $\rightarrow e \in \{0,1\}^\tau$, $\tau$ is a security parameter - Looking at the formula for OPRF, we assume that H(xbytearray, point in G) = $H(x || P_x || P_y)$ - H' maps from arbitrary length string $\rightarrow g \in G$ - H' is the "Hash into Elliptic Curve" function, which maps the password into a point on NIST P-256 curve - works over a group $G$ of prime order $p$ (e.g. NIST P-256 group) - extension picks a random number $\rho \in Z_q$ and raises the hash value of the input to the power $\rho$. - this blinding factor $\rho$ hides the password with information-theoretic security) SEND THIS AS $\alpha$ 2. DEVICE - check if $\alpha$ is $\in G$ - compute and SEND BACK $\beta = \alpha^k$ 3. BACK TO EXTENSION - check if $\beta$ is $\in G$ - raise the recieved value to the power of $\rho^{-1} \in Z_q$ - then compute the SHA-256 hash of calculated value. 4. BACK TO RWD PASSWORD - (same as PwdHash implementation) - encoded to a random combination of letters, numbers and symbols matching the passwrod requirement of the visited website and entered into pwd field of login page. N.B.- taking the exponential in a group is just repetition of the operation - i.e. $\forall x \in G, n \in Z, x^n = \underbrace{x+...+x}_\text{n times}$. - see this [crypto stack exchange thread](https://crypto.stackexchange.com/questions/57768/exponentiation-in-ecc)- how to take the inverse power in a group - Raise the point to the power $a^{-1} \in Z_p$ - this is fairly easy to do with [euclidean algorithm](https://en.wikipedia.org/wiki/Modular_multiplicative_inverse) - see [stack exchange link](https://math.stackexchange.com/questions/471269/point-division-in-elliptic-curve-cryptography) for more info- Instantiation assumes a cyclic group $G$ of prime order $q$, $|q|=\tau$, with generator $g$. - At init, User chooses master password pwd, while Device chooses and stores $k \leftarrow Z_q$.- H which looks like it accepts two arguments when it's called in $H(x, (H'(x))^k)$ really just means hash it all at once, by appending it. - in the paper, it describes this step as "Client hashes this value $(H'(pwd|domain))^k$ with the pwd to obtain rwd" - in the implementation, they use `crypto_generichash_update()` from [source.](https://github.com/stef/libsphinx/blob/master/src/sphinx.c) - furthermore, in the documentation for `` they use this function to compute the hash on a multi-part [example](https://libsodium.gitbook.io/doc/hashing/generic_hashingmulti-part-example-with-a-key) - there is a reddit thread which also explains how the function used can be used to hash variable length things such as a [stream](https://www.reddit.com/r/crypto/comments/7ooot2/using_libsodiums_generic_hash_to_hash_a_file/)- the notation of $\{0,1\}^\tau$ means a bit string of length $\tau$ - From this [paper on hashing into Elliptic Curves](https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-01page-4), they describe the following notation: - "bitstring of arbitrary length is denoted as $\{0, 1\}^*$"- octet means base-256, and not to be confused with octal which is base-8 Curve definitionUsing the primitives from `ecdsa` package in python, we will create the following curve based on the parameters for P-256.
###Code
# https://github.com/warner/python-ecdsa/blob/333ee3feb1dfc6797db7a83d221e5a3a9fafdc3f/src/ecdsa/ecdsa.py
# NIST Curve P-256:
# ORDER = 115792089210356248762697446949407573529996955224135760342422259061068512044369
PRIME = 115792089210356248762697446949407573530086143415290314195533631308867097853951
R = 115792089210356248762697446949407573529996955224135760342422259061068512044369
# s = 0xc49d360886e704936a6678e1139d26b7819f7e90L
# c = 0x7efba1662985be9403cb055c75d4f7e0ce8d84a9c5114abcaf3177680104fa0dL
A = -3
B = 0x5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B
Gx = 0x6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296
Gy = 0x4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5
curve_256 = ecc.CurveFp(PRIME, A, B, 1)
curve_256_generator = ecc.PointJacobi(curve_256, Gx, Gy, 1, R, generator=True)
###Output
_____no_output_____
###Markdown
1. Hashing password into elliptic curve 1.1 HashToBase ImplementationHere is a definition of the function:HashToBase(x): $H(x)[0:log_2(p) + 1]$, i.e., hash-truncate-reduce, where H is a cryptographic hash function, such as SHA256, and $p$ is the prime order of base field $F_p$.Here is some psuedo code:```HashToBase(x, i)Parameters: H - cryptographic hash function to use hbits - number of bits output by H p - order of the base field Fp label - context label for domain separationPreconditions: floor(log2(p)) + 1 >= hbitsInput: x - value to be hashed, an octet string i - hash call index, a non-negative integerOutput: y - a value in the field FpSteps: 1. t1 = H("h2c" || label || I2OSP(i, 4) || x) 2. t2 = OS2IP(t1) 3. y = t2 (mod p) 4. Output ywhere I2OSP, OS2IP [RFC8017] are used to convert an octet string toand from a non-negative integer, and a || b denotes concatenation ofa and b.```
###Code
from binascii import hexlify, unhexlify
from hashlib import sha1, sha256, sha384, sha512
import hashlib
from ecdsa import NIST256p
# http://www.secg.org/sec2-v2.pdf
# print(NIST256p.oid)
ORDER = NIST256p.order
# https://github.com/bdauvergne/python-pkcs1/blob/master/pkcs1/primitives.py
def OS2IP(x: str) -> int:
'''Converts the byte string x representing an integer reprented using the
big-endian convient to an integer.
'''
h = hexlify(x) #.binascii
return int(h, 16)
# https://github.com/bdauvergne/python-pkcs1/blob/master/pkcs1/primitives.py
def I2OSP(x: int, x_len: int = 4) -> str:
'''Converts the integer x to its big-endian representation of length
x_len.
'''
if x > 256**x_len:
raise ValueError("Integer too large.")
h = hex(x)[2:]
if h[-1] == 'L':
h = h[:-1]
if len(h) & 1 == 1:
h = '0%s' % h
x = unhexlify(h) #.binascii
return b'\x00' * int(x_len-len(x)) + x
print("OCT TEST: I2OSP(23) = {}, and then OS2IP(I2OSP(23)) = {}".format(I2OSP(23, 4), OS2IP(I2OSP(23, 4))))
# https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-02#appendix-C.5
def HashToBase(x: bytearray, i: int, label: str="label", p: int=ORDER) -> int:
'''Hashes the bytearray x with a label string, the hash call index i, and
returns y, a value in the field F_p
'''
H = sha256()
toHash = ["h2c", label, I2OSP(i, 4), x]
H.update(b"hc2")
H.update(label.encode())
# H.update(I2OSP(i,4))
H.update(str(i).encode())
H.update(x)
t1 = H.digest()
t2 = OS2IP(t1)
return (t2 % p) # = y
valueToBeHashed = 23
hashCallIndex = 11
print(PRIME)
print(ORDER)
print("HashToBase(I2OSP({})={}, {}) = {}".format(valueToBeHashed,
I2OSP(valueToBeHashed),
hashCallIndex,
HashToBase(I2OSP(valueToBeHashed), hashCallIndex)
))
###Output
OCT TEST: I2OSP(23) = b'\x00\x00\x00\x17', and then OS2IP(I2OSP(23)) = 23
115792089210356248762697446949407573530086143415290314195533631308867097853951
115792089210356248762697446949407573529996955224135760342422259061068512044369
HashToBase(I2OSP(23)=b'\x00\x00\x00\x17', 11) = 52666019840208479355183598407159392888009956878866650321053742936543132317912
###Markdown
1.2 Simplified SWU Method (5.2.3.)As per the [hashing into elliptic curves paper](https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-02section-5.2.3), for P-256 curve, we should use Simple SWU. ```The following map2curve_simple_swu(alpha) implements the simplifiedShallue-Woestijne-Ulas algorithm from [SimpleSWU]. This algorithmworks for any curve over F_{p^n}, where p = 3 mod 4, including:o P256o ...```Given curve equation $g(x) = x^3 + Ax + B$, this algorithm works as follows: 1. t = `HashToBase(\alpha)` 2. $\alpha = \frac{-b}{a} * (1+\frac{1}{t^4 + t^2})$ 3. $\beta = -t^2 * \alpha$ 4. If $g(\alpha)$ is square, output $(\alpha, \sqrt{g(\alpha)})$ 5. Output $(\beta, \sqrt{g(\beta)})$The following procedure implements this algorithm. It outputs a point with affine coordinates. It requires knowledge of A and B, the constants from the curve Weierstrass form.```map2curve_simple_swu(alpha) Input: alpha - value to be encoded, an octet string Output: (x, y) - a point in E Steps: 1. t = HashToBase(alpha) 2. alpha = t^2 (mod p) 3. alpha = alpha * -1 (mod p) 4. right = alpha^2 + alpha (mod p) 5. right = right^(-1) (mod p) 6. right = right + 1 (mod p) 7. left = B * -1 (mod p) 8. left = left / A (mod p) 9. x2 = left * right (mod p) 10. x3 = alpha * x2 (mod p) 11. h2 = x2 ^ 3 (mod p) 12. i2 = x2 * A (mod p) 13. i2 = i2 + B (mod p) 14. h2 = h2 + i2 (mod p) 15. h3 = x3 ^ 3 (mod p) 16. i3 = x3 * A (mod p) 17. i3 = i3 + B (mod p) 18. h3 = h3 + i3 (mod p) 19. y1 = h2 ^ ((p + 1) / 4) (mod p) 20. y2 = h3 ^ ((p + 1) / 4) (mod p) 21. e = CTEQ(y1 ^ 2, h2) // Constant-time equality 22. x = CMOV(x2, x3, e) // If e = 1, choose x2, else choose x3 23. y = CMOV(y1, y2, e) // If e = 1, choose y1, else choose y2 24. Output (x, y)``` Helper functions```o CMOV(a, b, c): If c = 1, return a, else return b. Common software implementations of constant-time selects assume c = 1 or c = 0. CMOV may be implemented by computing the desired selector (0 or 1) by ORing all bits of c together. The end result will be either 0 if all bits of c are zero, or 1 if at least one bit of c is 1.o CTEQ(a, b): Returns a == b. Inputs a and b must be the same length (as bytestrings) and the comparison must be implemented in constant time.```
###Code
print("CHECK: ensure p = {} = 3 mod 4, {}mod4 = 3mod4: {}\n".format(PRIME, PRIME%4, PRIME%4==3))
# https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-02#section-5.2.3
# Implementation of H' which maps from bytearray -> g \in G
def map2curve_simple_swu(alpha: bytearray) -> (int, int):
'''Maps the octet bytearray alpha into the elliptic curve, and returns a
point from the elliptic curve.
'''
t = HashToBase(alpha, 1)
alpha = pow(t, 2, PRIME)
alpha = -alpha % PRIME
right = (pow(alpha, 2, PRIME) + alpha) % PRIME
right = pow(right, PRIME-2, PRIME) # right^(-1) % PRIME
right = (right + 1) % PRIME
left = -B % PRIME
left = (left * pow(A, PRIME-2, PRIME)) % PRIME # (left * A^-1) % PRIME
x2 = (left * right) % PRIME
x3 = (alpha * x2) % PRIME
h2 = pow(x2, 3, PRIME) # x2 ^ 3 % PRIME
i2 = (x2 * A) % PRIME
i2 = (i2 + B) % PRIME
h2 = (h2 + i2) % PRIME
h3 = pow(x3, 3, PRIME) # x3 ^ 3 % PRIME
i3 = (x3 * A) % PRIME
i3 = (i3 + B) % PRIME
h3 = (h3 + i3) % PRIME
y1 = pow(h2, (PRIME+1) // 4, PRIME) # h2 ^ ((p + 1) / 4) % PRIME
y2 = pow(h3, (PRIME+1) // 4, PRIME) # h3 ^ ((p + 1) / 4) % PRIME
if pow(y1, 2, PRIME) == h2:
return ecc.Point(curve_256, x2, y1)
else:
return ecc.Point(curve_256, x3, y2)
# Implemented via the Simple SWU paper: https://eprint.iacr.org/2009/340.pdf
# 1. alpha = -t^2
# 2. X2 = -B/A * (1 + 1/(alpha^2 + alpha))
# 3. X3 = alpha*X2
# 4. h2 = g(X2), h3 = g(x3), if g(x) = x^3 + Ax + B
# 5. if h2 is square, return (X2, sqrt(g(X2))), else return (X3, sqrt(g(X3)))
def my_swu(alpha: bytearray, debug: bool=False) -> (int, int):
# 1. alpha = -t^2
t = HashToBase(alpha,1)
print("0. HashToBase(alpha)= \t\t\t", t) if debug else None
alpha = (-pow(t, 2, PRIME)) % PRIME
print("1. alpha=-t^2= \t\t\t\t", alpha) if debug else None
#X2 = -B/A * (1 + 1/(alpha^2 + alpha))
X2_left = -B % PRIME
X2_left = (X2_left * pow(A, PRIME-2, PRIME)) % PRIME
# X2_left = 52283484311836130297341192243151613979733528143761346583456295874302188418414
print("2.1 X2_left=-B/A= \t\t\t", X2_left) if debug else None
X2_right = (alpha+1) % PRIME
X2_right = (X2_right*alpha) % PRIME
X2_right = pow(X2_right, PRIME-2, PRIME)
X2_right = (X2_right + 1) % PRIME
print("2.2 X2_right=1+1/(alpha^2+alpha)= \t", X2_right) if debug else None
X2 = (X2_left * X2_right) % PRIME
print("2.3 X2= \t\t\t\t", X2) if debug else None
# X3 = alpha*X2
X3 = (alpha*X2) % PRIME
print("3. X3=alpha*X2= \t\t\t", X3) if debug else None
# h2 = g(X2), h3 = g(x3), if g(x) = x^3 + Ax + B
h2 = (pow(X2, 3, PRIME) + (A * X2)%PRIME + B) % PRIME
h3 = (pow(X3, 3, PRIME) + (A * X3)%PRIME + B) % PRIME
print("4.1 g(X2)= \t\t\t\t", h2) if debug else None
print("4.2 g(X3)= \t\t\t\t", h3) if debug else None
sh2 = pow(h2, (PRIME+1)//4, PRIME)
sh3 = pow(h3, (PRIME+1)//4, PRIME)
print("5.1 sqrt(g(X2))= \t\t\t", sh2) if debug else None
print("5.2 sqrt(g(X3))= \t\t\t", sh3) if debug else None
if pow(sh2, 2, PRIME) == h2:
print("X2, sh2^2 = h2") if debug else None
return ecc.Point(curve_256, X2, sh2)
else:
print("X3, sh3^2 = h3? {}".format(pow(sh3,2,PRIME) == h3)) if debug else None
return ecc.Point(curve_256, X3, sh3)
###Output
CHECK: ensure p = 115792089210356248762697446949407573530086143415290314195533631308867097853951 = 3 mod 4, 3mod4 = 3mod4: True
###Markdown
1.2.1 Testing CorrectnessTesting the SWU's implementation vs HashToBase's implementation, and showing it can successfully hash into the curve with all inputs `range(0, test_cases)`
###Code
test_cases = 30
correct = 0
correct_list = []
for test in range(test_cases):
p = map2curve_simple_swu(I2OSP(test))
if curve_256.contains_point(p.x(), p.y()):
correct += 1
correct_list.append(test)
print("correct: {}, are {}".format(correct, correct_list))
test_cases = 30
correct = 0
correct_list = []
for test in range(test_cases):
p = my_swu(I2OSP(test))
if curve_256.contains_point(p.x(), p.y()):
correct += 1
correct_list.append(test)
print("correct: {}, are {}".format(correct, correct_list))
###Output
correct: 30, are [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]
###Markdown
1.3 Usage of SWU
###Code
# Example of hashing from password to point on curve
print(map2curve_simple_swu(b"passwordwww.facebook.com"))
# or you can use the og swu implementation
print(my_swu(b"passwordwww.facebook.com"))
###Output
(50560896577634328033374378148020771698070026914100043083631439008968958738601,96434689098651036750287593872426977420942250920898794682863763170885260996734)
(50560896577634328033374378148020771698070026914100043083631439008968958738601,96434689098651036750287593872426977420942250920898794682863763170885260996734)
###Markdown
2 SPHINX architecture steps to reproduceNotes:- x is a byte array- $g \in G$1. client(x: bytearray) -> Point: - takes $x=$masterpassword || domainname - calculates $H'(x)$: bytearray -> $P(x,y) \in G$ - picks a random number $\rho$ - calculates and returns the blinded result $\alpha = H'(x)^\rho$2. device($\alpha$: Point) -> Point: - checks if $\alpha$ is in group $G$ - retrieves (or creates and store) $d$ in database - calculates and returns $\beta = \alpha^d$3. client(\beta: Point) -> bytearray: - checks if $\beta$ is in group $G$ - calculates $\beta^\frac{1}{\rho}$ and unblinds the result - calculates and returns H(x || \beta^(1/\rho))
###Code
# 1 client:
x = "masterpasswordwww.google.com"
hdashx = map2curve_simple_swu(x.encode())
print("H'(x):")
print(hdashx)
print(hex(hdashx.x()), hex(hdashx.y()))
rho = 23 # generate a random number here
alpha = hdashx * rho # hdashx^rho
print("alpha = (H'(x))^rho:")
print(alpha)
print(hex(alpha.x()), hex(alpha.y()))
# 2 device:
assert (curve_256.contains_point(alpha.x(), alpha.y()) == True)
d = HashToBase(b"some random way to produce this d key", 1)
print("d = ", d)
print(hex(d))
beta = d * alpha
print("beta = alpha^d:")
print(beta)
print(hex(beta.x()), hex(beta.y()))
# 3 client:
assert (curve_256.contains_point(beta.x(), beta.y()) == True)
# n.b.
print("rho^-1 * rho = ",pow(rho, PRIME-2, PRIME) * rho % PRIME)
final = beta * pow(rho, ORDER-2, ORDER)
print("final = beta^(1/rho)")
print(final)
print(hex(final.x()), hex(final.y()))
# check correctness
check_final = hdashx * d
print(check_final)
assert (curve_256.contains_point(check_final.x(), check_final.y()) == True)
print("Check that this result `check final` equals `final`:")
print (check_final == final)
# Finally, hash this result
# Oblivious Psuedo-Random Function
def OPRF(x: str, point: ecc.Point) -> bytearray:
'''Performs the actual Hash H of H(x, (H'(X))^d), which is the hash of a
bytearray x and a Point on the curve. Returns a bytearray result.
'''
H = sha256()
H.update(x.encode())
H.update(I2OSP(point.x(), 256))
H.update(I2OSP(point.y(), 256))
return H.digest()
rwdbytes = OPRF(x, final)
print(rwdbytes, len(rwdbytes))
# convert this to a password
import os
def gen_password(rwd: bytearray, length: int=32, charset: str="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()") -> str:
'''Generates the password based on the result of the OPRF function
'''
len_charset = len(charset)
indices = [int(len_charset * (ord(chr(byte)) / 256.0)) for byte in rwd]
return "".join([charset[index] for index in indices])
print("Your facebook password is: ", gen_password(rwdbytes))
###Output
b"\xec\xf8J\xf9\xd7\xe6\xa9e\xa5h\x1duj\x16\x91''\xd4\xe4\x89\x1c\xc9\xef\xfeo}\x17\xb7\x10\x8d.$" 32
Your facebook password is: %*U(8#vcudIgdGoKK7#mH4^)fjGzEnMK
###Markdown
2.1 OverviewHere is all the basic functionality of the each part captured as a function. They follow the naming convension "AToB", where A is the current Entity (Client, Device), as shown below.
###Code
x = "masterpasswordwww.google.com"
# Client 1
def clientToPoint(x: str) -> ecc.Point:
'''input the master password pwd and returns a point on the curve alpha
with the random integer that was used to blind it.
'''
hdashx = map2curve_simple_swu(x.encode())
rho = OS2IP(os.urandom(32))
return hdashx * rho # alpha = hdashx^rho
# Device
def deviceToClient(alpha: ecc.Point, index: int=1) -> ecc.Point:
'''input the point on the curve. If it is in the Group, we store
a random key D that corresponds to this point, and return the point
exponeniated to D.
'''
if curve_256.contains_point(alpha.x(), alpha.y()) != True:
return 0
print("ALPHAS: ", hex(alpha.x()), hex(alpha.y()))
randomBytes = os.urandom(32)
d = HashToBase(randomBytes, index)
print("DEVICE: I am going to store d: ", d)
return d * alpha # beta = alpha^d
#Client 2
def clientToPassword(beta: ecc.Point) -> str:
'''input the point on the curve. If it is in the Group, we compute
this point exponeniated to the inverse of rho, and then we use the
OPRF to create the byte array which generates the final password rwd
'''
if curve_256.contains_point(beta.x(), beta.y()) != True:
return 0
final = beta * pow(rho, ORDER-2, ORDER)
print("FINAL: ", hex(final.x()), hex(final.y()))
rwdbytes = OPRF(x, final)
return gen_password(rwdbytes, charset="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()")
# Usage
# --------- Start Client ---------
alpha = clientToPoint(x)
# --------- End Client ---------
# send alpha to Device
# --------- Start Device ---------
beta = deviceToClient(alpha)
# --------- End Device ---------
# send beta to Client
# --------- Start Client ---------
rwd = clientToPassword(beta)
print("CLIENT: my password is", rwd)
# --------- End Client ---------
###Output
ALPHAS: 0x6d68db7cb3dc8e41218e21c99686a1c4b15f4b28b042c20e9cf1a5ec4269f77a 0x6caf2ff7e2661f15eabc9b92dac5c99847ee9af38eade284e2fe4dcdc41246cf
DEVICE: I am going to store d: 39293753817579684587358563962791485292162063136866083282799165565461415876781
FINAL: 0x7e5b03b2c70b79dbf3ebee465c1af0dacf48abb1659a26251cf90c9324da7d43 0x7ef85457e3fa4289a7b49cd37d6ca2b285fd51ad68433a70598cad63033cb52b
CLIENT: my password is 5tce4XdQQHobplMPqhfUIwtR%RbOnlAw
|
SQL_Exercise.ipynb | ###Markdown
1. Setup
###Code
# common imports
import pandas as pd
import numpy as np
import os
###Output
_____no_output_____
###Markdown
Q.2 SQL Skills Assessment 2.1 Create a Table, Run a Simple Query "Create your own employee table in the following format: EMPLOYEE_ID, NAME, PROFESSION. How do you generate a new table with number of employees in each of the following categories of professions: IT, SALES and OTHER?"
###Code
n_empl = 2000
###Output
_____no_output_____
###Markdown
I create a tables with employees using `pandas` and `randint` method from `numpy.random`
###Code
# check how it looks
np.random.seed(42)
# EMPLOYEE_ID varies from 1000000 to 10000000,
# NAME is generated randomly out of 2-digit integers (see below),
# PROFESSION is selected out of 10 (see a dictionary below)
np.random.randint(low=[100000, 1, 1], high=[1000000, 100, 11], size=(10, 3))
prof_dict = {1: "FREELANCER",
2: "RESEARCHER",
3: "ACCOUNTANT",
4: "DOCTOR",
5: "IT",
6: "SALES",
7: "MANAGER",
8: "SERVICE",
9: "ENGINEER",
10: "OTHER"}
np.random.seed(42)
my_df = pd.DataFrame(np.random.randint(low=[100000, 10, 1], high=[1000000, 100, 11], size=(n_empl, 3)),
columns = ["EMPLOYEE_ID", "NN", "PROF_ID"])
my_df
my_df["PROFESSION"] = my_df["PROF_ID"].map(prof_dict)
# make name be a repetition of characters:
# if an integer is YZ, then the first name is Y times a letter that corresponds to Y
# and the last name Z times a letter that corresponds to Z
my_df["NAME"] = [
chr(ord('@') + cval//10)*(cval//10)
+ " " + chr(ord('@') + cval%10)*(cval%10)
for cval in my_df["NN"].values
]
my_df
###Output
_____no_output_____
###Markdown
I had to set up the database manually using the MS SQL Server 2019, so I imported the dataframe into a CSV file
###Code
my_df[["EMPLOYEE_ID", "NAME", "PROFESSION"]].to_csv("employees.csv")
###Output
_____no_output_____
###Markdown
I import this csv file into a local SQL database using "SQL Server 2019 Import and Export Tool". Then I run the following query:```mysqlSELECT PROFESSION, count(EMPLOYEE_ID) FROM employees -- -- AN ALTERNATIVE WOULD BE: -- where (PROFESSION like '%IT%' OR PROFESSION like '%OTHER%' OR PROFESSION like '%SALES%') where (PROFESSION = 'IT' OR PROFESSION = 'OTHER' OR PROFESSION = 'SALES') group by PROFESSION ``` "Please provide both your table and the query."Here you go:
###Code
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
img = mpimg.imread("screenshot1.png")
plt.figure(figsize=(16,9))
plt.imshow(img)
###Output
_____no_output_____
###Markdown
2.2 Construct a Table "You have the following definition and the transactional tables. How do you construct the output table with the columns “date, new_users, active_users, churn_users, reactivated_users” in SQL? In the definition table (user_status_table), “x” represents activity, “-“ represents no activity and “NA” represents when it’s not important."user_status | Activity before 60d | Activity 30-60d ago | Activity in last 30d------------|---------------------|---------------------|---------------------new | - | - | Xactive | NA | X | Xchurned | NA | X | -reactivated | X | - | X ProblemsThis formulation implies that same users can change their status as time runs.There is no information about the time frequency, so I assumed it can be arbitrary.Moreover, it is not clear whether there can be duplicates in the `activity_table`.**Note:** Without loss of generality, I assume that the `activity_table` does not contain any duplicates. In case there are duplicates, we can first exclude those by creating a new `activity_table2` without any duplicates and use it instead.To create a (temp) copy of a table without date-user duplicates, run:```mysqlSELECT DISTINCT convert(date, date) as date_ymd, user_id INTO activity_table2 FROM activity_table```**WARNING:** The column names `user_id` and `date` are bad since they are reserved system names.I will write the queries using these names, however, in a real database you should avoid naming your columns like that. IdeaI do not know whether there exist a way to create a classifier and then simply count entries that are produced by this classifier.Nevertheless, I have the following idea.* For each date, I can calculate whether the user was active on that date. For each user, I can calculate the date when that user was active for the first/last time. - I can calculate how long has passed since the user was last active. This gives me information about churned users (if 30 < time diff between the date and last activity date is < 60). Let's denote the table with such users as $Churned$. - I can calculate how long ago the user became active (for the first time). This gives me info on new users (first time active < 30), denoted as $New$, as well as long-time active users (who can be still active, churned or reactivated), denoted as $LongActive$. I, therefore, can only calculate for each date the number of new users and the number of churned ones. I need to distinguish between active and reactivated users. * For each date, I can calculate `pDate` = `date` - 30 days. - Use these dates to create a temporary table `temp_activity_table` (date, pDate, user_id[pDate]). * For users in this temp table I can now identify who were *not* active 30 days before pDate, denote those as $\Gap30to60$. * These are candidates for reactivated users: $Reactivated = (LongActive \backslash Churned) \cap \Gap30to60$. * The rest are reactivated: $Active = (LongActive \backslash Churned) \backslash Reactivated$. **Note:** I will use the definitions given in the `user_status` table, i.e. calculate difference between two dates in *days*, not in months. ScriptThe following script should do the job. ```mysql--------------------------------------------------------------------------------- Delete a table if it exists-- (just for convenience)IF OBJECT_ID('tempdb.dbo.activity2', 'U') IS NOT NULL DROP TABLE activity2 ;-- Create a table without day-user duplicatesSELECT DISTINCT convert(date, date) AS dat, user_id INTO activity2 FROM activity ;--------------------------------------------------------------------------------- Make a table with the lag(date) - for each date make an entry when the user was active last timeSELECT a.user_id, a.dat, lag(dat) OVER(PARTITION BY user_id ORDER BY dat) as prev_date FROM activity2 a ORDER BY user_id, dat ;-- For convenienceIF OBJECT_ID('dbo.GAP30p', 'U') IS NOT NULL DROP TABLE dbo.GAP30p ;-- Create a table "GAP30p" whith users that were last active more that 30 days ago compared to the date we observe them againSELECT b.user_id, b.dat, b.prev_date, datediff(d, b.prev_date, b.dat) as gap INTO GAP30p FROM ( SELECT user_id, dat, lag(dat) OVER(PARTITION BY user_id ORDER BY dat) as prev_date FROM activity2 ) b WHERE datediff(d, b.prev_date, b.dat) > 30 ORDER BY user_id, dat ;-- See the result--select * from GAP30p order by user_id, dat ;--------------------------------------------------------------------------------- CROSS-JOINT Table with all users on all datesIF OBJECT_ID('tempdb.dbo.ct', 'U') IS NOT NULL DROP TABLE ct ;SELECT * INTO ct FROM ( SELECT DISTINCT dat as dates FROM activity2 ) a CROSS JOIN ( SELECT DISTINCT user_id FROM activity2 ) b ;--------------------------------------------------------------------------------- For convenienceIF OBJECT_ID('dbo.Churned', 'U') IS NOT NULL DROP TABLE dbo.Churned ; -- Create a table ("Churned") that shows for each possible date all the users that were churned at that dateSELECT ct.dates, ct.user_id, g.dat AS appeared_again, g.prev_date as last_appear, g.gap as absence_period INTO Churned FROM ct RIGHT JOIN GAP30p g ON ct.user_id = g.user_id WHERE datediff(d, g.dat, ct.dates) 30 ORDER BY ct.user_id, ct.dates, g.dat ;-- See the result--select * from Churned ORDER BY dates, user_id ;--------------------------------------------------------------------------------- For convenienceIF OBJECT_ID('dbo.New', 'U') IS NOT NULL DROP TABLE dbo.New ;-- Create a table ("New") that shows for each possible date all the users that were first time active less than 30d agoSELECT ct.dates, ct.user_id, f.first_active, datediff(d, f.first_active, ct.dates) AS activity_length INTO New FROM ct RIGHT JOIN ( SELECT DISTINCT user_id, min(dat) OVER(PARTITION BY user_id ORDER BY dat) as first_active FROM activity2 ) f ON ct.user_id = f.user_id WHERE datediff(d, f.first_active, ct.dates) = 0 ORDER BY ct.user_id, ct.dates ;-- See the result--select * from New ORDER BY dates, user_id ;--------------------------------------------------------------------------------- first time active more than 30d agoIF OBJECT_ID('dbo.LongActive', 'U') IS NOT NULL DROP TABLE dbo.LongActive ;SELECT ct.dates, ct.user_id, f.first_active, datediff(d, f.first_active, ct.dates) AS activity_length INTO LongActive FROM ct RIGHT JOIN ( SELECT DISTINCT user_id, min(dat) OVER(PARTITION BY user_id ORDER BY dat) as first_active FROM activity2 ) f ON ct.user_id = f.user_id WHERE datediff(d, f.first_active, ct.dates) >= 30 ORDER BY ct.user_id, ct.dates ;-- See the result--SELECT * FROM LongActive ORDER BY user_id, dates ;-- just a table with lagged datesIF OBJECT_ID('tempdb.dbo.lagdates', 'U') IS NOT NULL DROP TABLE lagdates ;SELECT ct.dates, CONVERT(date, CONVERT(datetime, ct.dates) - 30) AS pDate, ct.user_id INTO lagdates FROM ct WHERE CONVERT(date, CONVERT(datetime, ct.dates) - 30) >= '2000-01-01' ORDER BY dates, pDate, user_id ;-- See the result--SELECT * FROM lagdates ORDER BY dates, user_id ;--------------------------------------------------------------------------------- from a 30d ago point of view, these users were absent for 30 daysIF OBJECT_ID('dbo.GAP30to60', 'U') IS NOT NULL DROP TABLE dbo.GAP30to60 ;SELECT lagdates.dates, lagdates.pDate, lagdates.user_id, g.dat AS appeared_again, g.prev_date as last_appear, g.gap as absence_period INTO GAP30to60 FROM lagdates RIGHT JOIN GAP30p g ON lagdates.user_id = g.user_id WHERE datediff(d, g.dat, lagdates.pDate) 30 ORDER BY lagdates.user_id, lagdates.dates, g.dat ;-- Show the result--SELECT * FROM GAP30to60 ORDER BY user_id, dates, pDate, appeared_again ;--------------------------------------------------------------------------------- Reactivated users are those who are not in Churned but are in GAP30to60IF OBJECT_ID('dbo.Reactivated', 'U') IS NOT NULL DROP TABLE dbo.Reactivated ;SELECT la_no_c.*, g.pDate, g.appeared_again, g.last_appear, g.absence_period INTO Reactivated FROM ( SELECT la.* FROM LongActive la LEFT JOIN Churned c ON la.dates = c.dates and la.user_id = c.user_id WHERE c.user_id IS NULL AND c.dates IS NULL ) la_no_c INNER JOIN GAP30to60 g ON la_no_c.dates = g.dates AND la_no_c.user_id = g.user_id ORDER BY la_no_c.dates, la_no_c.user_id ;-- Show the result--select * from Reactivated order by dates, user_id ;--------------------------------------------------------------------------------- Active users are those who are not in Churned AND not ReactivatedIF OBJECT_ID('dbo.Active', 'U') IS NOT NULL DROP TABLE dbo.Active ;SELECT la_no_c.* INTO Active FROM ( SELECT la.* FROM LongActive la LEFT JOIN Churned c ON la.dates = c.dates and la.user_id = c.user_id WHERE c.user_id IS NULL AND c.dates IS NULL ) la_no_c LEFT JOIN Reactivated r ON la_no_c.dates = r.dates AND la_no_c.user_id = r.user_id WHERE r.user_id IS NULL AND r.dates IS NULL ORDER BY la_no_c.dates, la_no_c.user_id ;-- Show the result--select * from Active order by dates, user_id ;----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- FINAL TABLE WITH:-- * date, new_users, active_users, churn_users, reactivated_usersSELECT ct.dates, count(n.user_id) as 'new_users', count(a.user_id) as 'active_users', count(c.user_id) as 'churn_users', count(r.user_id) as 'reactivated_users' FROM ct FULL OUTER JOIN New n ON ct.dates = n.dates AND ct.user_id = n.user_id FULL OUTER JOIN Active a ON ct.dates = a.dates AND ct.user_id = a.user_id FULL OUTER JOIN Churned c ON ct.dates = c.dates AND ct.user_id = c.user_id FULL OUTER JOIN Reactivated r ON ct.dates = r.dates AND ct.user_id = r.user_id GROUP BY ct.dates ORDER BY ct.dates``` I tested the script on the data, generated as follows.
###Code
import numpy as np
import pandas as pd
import datetime as dt
# id, Year, Month, Day
np.random.seed(42)
a = np.random.randint(low=[1, 2000, 1, 1], high=[100, 2001, 13, 29], size=(1000, 4))
d = [dt.datetime(row[0], row[1], row[2]) for row in a[:, 1:]]
df = pd.DataFrame(np.c_[a[:, 0], d])
df.columns = ['user_id', 'date']
df.sort_values(by='date', axis=0).to_csv("activity.csv")
###Output
_____no_output_____
###Markdown
Again, I imported the CSV file into a MS SQL Server 2019 using Import/Export tool. After running the SQL query presented above (it is also saved in the `Q2.2.sql` file), I saved the output into `result.csv`.Here I pring its content (partially, as pandas crops middle lines by default).
###Code
import pandas as pd
result = pd.read_csv('result.csv', delimiter=';', header=None, names=[
"date", '#new_users', '#active_users', '#churn_users', '#reactivated_users'])
result
###Output
_____no_output_____ |
2019900004_Linear_Prog.ipynb_checkpoints/OM_Assignment_1-checkpoint.ipynb | ###Markdown
Assignment-1 Linear Programming The objective of this assignment is to show the applications of linear programming in real life problems. You will be asked to solve problems from classical physics to puzzles. Instructions - For each question you need to write the formulation in markdown and solve the problem using `cvxpy`. - Ensure that this notebook runs without errors when the cells are run in sequence. - Plagarism will not be tolerated. - Use only `python3` to run your code. - If you are facing issues running the notebook on your local system. Use google collab to run the notebook online. To run the notebook online, go to [google collab](!https://colab.research.google.com/notebooks/intro.ipynb). Go to `File -> Upload Notebook` and import the notebook file Submission - Rename the notebook to `.ipynb` and submit **ONLY** the notebook file on moodle. Problems 1. Sudoku 2. Best Polyhedron 3. Largest Ball4. Illumination Problem5. Jigsaw Puzzle
###Code
# Installation dependencies
!pip3 install numpy==1.18.1 matplotlib==3.1.3 scipy==1.4.1 sklearn
!pip3 install cvxpy==1.0.25 scikit-image==0.16.2
# Compatibility imports
from __future__ import print_function, division
# Imports
import os
import sys
import random
import numpy as np
import cvxpy as cp
import matplotlib.pyplot as plt
# Modules specific to problems
from sklearn.datasets import make_circles # For problem 2 (Best Polyhedron)
from scipy.spatial import ConvexHull # For problem 3 (Largest Ball in Polyhedron)
from scipy.linalg import null_space # For problem 4 (Illumination)
import matplotlib.cbook as cbook # For problem 5 (Jigsaw)
from skimage.transform import resize # For problem 5 (Jigsaw)
% matplotlib inline
###Output
_____no_output_____
###Markdown
Question-1 Sudoku - In this problem you will develop a mixed integer programming algorithm, based upon branch and bound, to solve Sudoku puzzles as described in class.- In particular, you need to implement the class SudokuSolver The function takes as input a Sudoku puzzle as a 9x9 “list of lists” of integers, i.e., puzzle = [[4, 8, 0, 3, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 7, 1], [0, 2, 0, 0, 0, 0, 0, 0, 0], [7, 0, 5, 0, 0, 0, 0, 6, 0], [0, 0, 0, 2, 0, 0, 8, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 7, 6, 0, 0, 0], [3, 0, 0, 0, 0, 0, 4, 0, 0], [0, 0, 0, 0, 5, 0, 0, 0, 0]]where zeros represent missing entries that must be assigned by your algorithm, and all other integers represent a known assignment. - The class SudokuSolver inherits the Sudoku class. You need to make changes **only** to the SudokuSolver class. Write function `plot` to plot the unsolved and solved puzzle. Write function `solve` to create our own solver, the function can get the unsolved puzzle as the input as should return a 9x9 numpy array (solved puzzle), where solved puzzle contains the input puzzle with all the zeros assigned to their correct values. For instance, for the above puzzlethis would be solved_puzzle = [[4, 8, 7, 3, 1, 2, 6, 9, 5], [5, 9, 3, 6, 8, 4, 2, 7, 1], [1, 2, 6, 5, 9, 7, 3, 8, 4], [7, 3, 5, 8, 4, 9, 1, 6, 2], [9, 1, 4, 2, 6, 5, 8, 3, 7], [2, 6, 8, 7, 3, 1, 5, 4, 9], [8, 5, 1, 4, 7, 6, 9, 2, 3], [3, 7, 9, 1, 2, 8, 4, 5, 6], [6, 4, 2, 9, 5, 3, 7, 1, 8]] - You should write code to solve this problem using cvxpy.**Write the code in SudokuSolver class only**.
###Code
# Class Sudoku will generate new sudoku problems for you to solve. You cannot change this code. Complete the formulation and the solver below
class Sudoku():
def __init__(self):
super(Sudoku,self).__init__()
self.puzzle = None # Unsolved sudoku
self.solution = None # Store the solution here
pass
def construct_solution(self):
"""
This function created a 9x9 solved sudoku example.
It can be used as a reference to see the performance of your solver.
"""
while True: # until a solved sudoku puzzle if created
puzzle = np.zeros((9,9))
rows = [set(range(1,10)) for i in range(9)] # set of available
columns = [set(range(1,10)) for i in range(9)] # numbers for each
squares = [set(range(1,10)) for i in range(9)] # row, column and square
try:
for i in range(9): # for each roe
for j in range(9): # for each column
# Randomly choose a possible number for the location
choices = rows[i].intersection(columns[j]).intersection(squares[(i//3)*3 + j//3])
choice = random.choice(list(choices))
puzzle[i,j] = choice # update the puzzle
# Remove from the choice from row,column, square
rows[i].discard(choice)
columns[j].discard(choice)
squares[(i//3)*3 + j//3].discard(choice)
# success! every cell is filled.
return puzzle
except IndexError:
# if there is an IndexError, we have worked ourselves in a corner (we just start over)
continue
def construct_problem(self,solution,n=28):
"""
Construct the puzzle by removing a cell if it is possible to deduce a cell's value from the remaining cells
@param: n => minimum number of unplucked/remaining cells
"""
def canBeDeduced(puz, i, j, c): # check if the cell can be deduced from the remaining cells
v = puz[c//9,c%9]
if puz[i,j] == v: return True
if puz[i,j] in range(1,10): return False
for m in range(9): # test row, col, square
# if not the cell itself, and the mth cell of the group contains the value v, then "no"
if not (m==c//9 and j==c%9) and puz[m,j] == v: return False
if not (i==c//9 and m==c%9) and puz[i,m] == v: return False
if not ((i//3)*3 + m//3==c//9 and (j//3)*3 + m%3==c%9) and puz[(i//3)*3 + m//3,(j//3)*3 + m%3] == v:
return False
return True
cells = set(range(81))
cellsLeft = set(range(81))
while len(cells) > n and len(cellsLeft): # Cells in the problem > n and cells left to be plucked > 0
cell = random.choice(list(cellsLeft)) # choose a random cell
cellsLeft.discard(cell)
# record whether another cell in these groups could also take
# on the value we are trying to pluck
row = col = square = False
for i in range(9): # For all numbers
if i != cell/9: # can be deduced from the row
if canBeDeduced(solution, i, cell%9, cell): row = True
if i != cell%9: # can be deduced from the col
if canBeDeduced(solution, cell//9, i, cell): col = True
if not (((cell//9)//3)*3 + i//3 == cell//9 and ((cell//9)%3)*3 + i%3 == cell%9): # can be deduced from the square
if canBeDeduced(solution, ((cell//9)//3)*3 + i//3, ((cell//9)%3)*3 + i%3, cell): square = True
if row and col and square:
continue # could not pluck this cell, try again.
else:
# this is a pluckable cell!
solution[cell//9][cell%9] = 0 # 0 denotes a blank cell
cells.discard(cell) # remove from the set of visible cells (pluck it)
# we don't need to reset "cellsleft" because if a cell was not pluckable
# earlier, then it will still not be pluckable now (with less information
# on the board).
return solution
###Output
_____no_output_____
###Markdown
**Write the formulation of your solution here**
###Code
# Create your sudoku puzzle solver here
class SudokuSolver(Sudoku):
def __init__(self):
super(SudokuSolver,self).__init__()
self.solution = self.construct_solution() # Store the solution here
self.puzzle = self.construct_problem(self.solution.copy(),n=28) # Unsolved sudoku
def plot(self):
print("Puzzle")
print(self.puzzle)
"""
Write code here for plotting your solution
"""
print("Original Solution")
print(self.solution)
def solve(self):
"""
Write your code here.
The function should return the solved sudoku puzzle
"""
return
solver = SudokuSolver()
solver.solve()
solver.plot()
###Output
_____no_output_____
###Markdown
Question-2 Polyhedron Explain how you would solve the following problem using linear programming. Youare given two sets of points in Rn:$$S1 = \{ x_1, . . . , x_N \}, \space S2 = \{y_1, . . . , y_M\}.$$You are asked to find a polyhedron$$P = \{x | \space a_i^T x ≤ b_i, i = 1, . . . , m\}$$that contains the points in S1 in its interior, and does not contain any of the points in S2:$$S1 ⊆ \{x | \space a_i^T x < b_i, i = 1, . . . , m\}$$$$ S2 ⊆ \{x |\space a_i^T x > b_i \space for \space \space at \space \space least\space\space one \space i \} = R_n - P.$$An example is shown in the figure, with the points in S1 shown as open circles and the points in S2 as filled circles. You can assume that the two sets are separable in the way described. - Your solution method should return a_i and b_i, i = 1, . . . , m, given the sets S1 and S2. The number of inequalitiesm is not specified, but it should not exceed 20, i.e your polyhedron should not have more than 20 faces. - You are allowed to solve one or moreLPs or LP feasibility problems. The method should be efficient, i.e., the dimensions of theLPs you solve should not be exponential as a function of N and M.- You can calculate the quality of your solution by dividing the number of points in S1 your polyhedron is leaving out (points lying outside the polyhedron) by the total number of points in the set S1 (= N). The lower the value, the more efficient your solution will be. Use this metric to choose the most efficient solution out of all the possible solutions.- The class PolyhedronSolver inherits the Polyhedron class. You need to make changes **only** to the PolyhedronSolver class. Write function `plot` to plot the points and the polyhedron (Look at question-3 on how to plot a polyhedron). Write function `solve` to create our own solver, the function can get the S1 & S2 as the input as should return a numpy array of size Dx2, where the D is the number the vertices of the polyhedron.
###Code
class Polyhedron():
def __init__(self):
super(Polyhedron,self).__init__()
data, labels = make_circles(n_samples=1000, noise=0.15,factor=0.3) # This will create our data
self.S1 = data[labels==0] # Points outside the polyhedron
self.S2 = data[labels==1] # Points intside the polyhedron
###Output
_____no_output_____
###Markdown
**Write the formulation of your solution here**
###Code
class PolyhedronSolver(Polyhedron):
def __init__(self):
super(PolyhedronSolver,self).__init__()
pass
def plot(self):
fig = plt.figure(figsize=(8,8)) # Create 8x8 inches figure
ax = fig.add_subplot(111) # Create a graph inside the figure
ax.scatter(self.S1[:,0],self.S1[:,1],c="red",label="outside polyhedron") # Plot S1
ax.scatter(self.S2[:,0],self.S2[:,1],c="orange",label="inside polyhedron") # PlotS2
"""
Write code here for plotting your polyhedron
"""
ax.set_title("Polyhedron Dividing the data")
plt.legend()
plt.show()
def solve(self):
"""
Write your code here.
"""
return
solver = PolyhedronSolver()
solver.plot()
solver.solve()
###Output
_____no_output_____
###Markdown
Question-3 Largest Ball in a polyhedron Find the largest ball $$ B(x_c, R) = \{ x : ||x − x_c|| ≤ R \}$$enclosed in a given polyhedron$$ P = \{ x | a_i^T x ≤ b_i, i = 1, . . . , m\} $$- The problem variables are the center xc ∈ Rn and the radius R of the ball.- The class CircleSolver inherits the CircleSolver class. You need to make changes only to the CircleSolver class. Write function `plot` to plot the polyhedron and the circle. Write function `solve` to create our own solver, the function can get the polyhedron as the input as should return a tuple (center,radius) where center is 1x2 numpy array containing the center of the circle, and radius is a scalar value containing the largest radius of the possible.
###Code
class CircleInPolygon():
def __init__(self):
super(CircleInPolygon,self).__init__()
self.polygon = np.random.random((10,2))
self.polygon = self.polygon[ConvexHull(self.polygon).vertices,:] # A polygon is stored here
###Output
_____no_output_____
###Markdown
**Write the formulation of problem here**
###Code
# Create your circle puzzle solver here
class CircleSolver(CircleInPolygon):
def __init__(self):
super(CircleSolver,self).__init__()
def plot(self):
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
ax.plot(self.polygon[:,0],self.polygon[:,1],linewidth=3,c="black") # Plot the points
ax.plot([self.polygon[0,0],self.polygon[-1,0]],[self.polygon[0,1],self.polygon[-1,1]],linewidth=3,c="black") # Plot the edges
ax.scatter(self.polygon[:,0],self.polygon[:,1],s=100,c="red",label="Polygon") # Plot the edge connecting last and the first point
"""
Add code to plot the circle
"""
ax.set_title("Largest Circle inside a polyhedron")
plt.legend()
plt.show()
def solve(self):
pass
solver = CircleSolver()
solver.solve()
solver.plot()
###Output
_____no_output_____
###Markdown
Question-4 Illumination Problem We consider an illumination system of m lamps, at positions l1, . . . , lm ∈ R2, illuminating n flat patches. The patches are line segments; the ith patch is given by $$[v_i, v_i+1]$$ where v1, . . . , vn+1 ∈ R2. The variables in the problem are the lamp powers p1, . . . , pm, which can vary between 0 and 1.The illumination at (the midpoint of) patch i is denoted Ii. We will use a simple model for the illumination: $$Ii = \sum_{j=1}^m a_{ij}p_{j} $$ $$ a_{ij} = r_{ij} ^{−2} ( max(cos θ_{ij},0) )$$where rij denotes the distance between lamp j and the midpoint of patch i, and θij denotes the angle between the upward normal of patch i and the vector from the midpoint of patch i to lamp j. This model takes into account “self-shading” (i.e., the fact that a patch is illuminated only by lamps in the halfspace it faces) but not shading of one patch caused by another. Of course we could use a more complex illumination model, including shading and even reflections. This just changes the matrix relating the lamp powers to the patch illumination levels.The problem is to determine lamp powers that make the illumination levels close to a given desired illumination level Ides, subject to the power limits 0 ≤ pi ≤ 1. Suppose we use the maximum deviation $$ φ(p) = max_{k=1,...,n} |I_{k} − I_{des}| $$as a measure for the deviation from the desired illumination level. Formulate the illumination problem using this criterion as a linear programming problem.Create the data using the $Illumination$ class and solve the problem using IlluminationSolver class. The elements of A are the coefficients aij in the above equation. Compute a feasible p using this first method, and calculate φ(p)
###Code
class Illumination():
def __init__(self):
super(Illumination,self).__init__()
# Lamp position
self.Lamps = np.array([[0.1 ,0.3, 0.4, 0.6 ,0.8 ,0.9 ,0.95],[1.0, 1.1, 0.6 ,0.9, 0.9 ,1.2, 1.00]])
self.m = self.Lamps.shape[1] # number of lamps
# begin and endpoints of patches
self.patches = [np.arange(0,1,1/12),np.array([0 ,0.1 ,0.2, 0.2, 0.1, 0.2 ,0.3 ,0.2 ,0 , 0 , 0.2, 0.1])]
self.patches = np.array(self.patches)
self.n = self.patches.shape[1] -1 # number of patches
# desired illumination
Ides = 2;
# construct A
self.dpatches = self.patches[:,1:] - self.patches[:,:-1]; # tangent to patches
self.patches_mid = self.patches[:,1:] - 0.5*self.dpatches; # midpoint of patches
A = np.zeros((self.n,self.m));
for i in range(self.n):
for j in range(self.m):
dVI = self.Lamps[:,j]-self.patches_mid[:,i] # Find the distance between each lamp and patch
rij = np.linalg.norm(dVI,ord=2) # Find the radius/distance between lamp and the midpoint of the patch
normal = null_space(self.dpatches[:,i].reshape(1,2)) # Find the normal
if normal[1] < 0: # we want an upward pointing normal
normal = -1*normal
A[i,j] = dVI.dot(normal)/(np.linalg.norm(dVI,ord=2)*np.linalg.norm(normal,ord=2))/(rij**2); # Find A[i,j] as defined above
if A[i,j] < 0:
A[i,j] = 0
self.A = A
###Output
_____no_output_____
###Markdown
**Write the formulation of problem here**
###Code
# Create your illumination solver here
class IlluminationSolver(Illumination):
def __init__(self):
super(IlluminationSolver,self).__init__()
def plot(self):
fig = plt.figure(figsize=(16,8))
ax = fig.add_subplot(111)
ax.scatter(self.Lamps[0,:],self.Lamps[1,:],s=100,c="red",label="Lamps") # Lamps
ax.scatter(self.patches_mid[0,:],self.patches_mid[1,:],s=50,c="blue",label="Patch Mid-point") # Lamps
ax.plot(self.patches[0,:],self.patches[1,:],linewidth=3,c="black",label="Patches") # Patches
# Normal joining lamps and patchs
for i in range(self.n):
for j in range(self.m):
if self.A[i,j] > 0:
ax.plot([self.Lamps[0,j], self.patches_mid[0,i]],[self.Lamps[1,j], self.patches_mid[1,i]],'r--',linewidth=0.1,alpha=1)
ax.text((self.Lamps[0,j]+self.patches_mid[0,i])/2,(self.Lamps[1,j] + self.patches_mid[1,i])/2,"A={0:.2f}".format(self.A[i,j]),alpha=0.5)
plt.legend()
plt.show()
def solve(self):
"""
Write your solver here
"""
pass
solver = IlluminationSolver()
solver.plot()
###Output
_____no_output_____
###Markdown
Question-5 Jigsaw Solving jigsaw puzzles computationally remains a relevant and intriguing problem noted for its applications to real-world problems. In this problem, you'll implement a solver that solves jigsaw puzzles using linear programming. The current problem requires you to only solve Type I problems (i.e. problems where the orientation of the pieces is known).You may refer to this paper [Solving Jigsaw Puzzles with Linear Programming](https://arxiv.org/abs/1511.04472) for implementation details. - Jigsaw class will take an image as input and will create a random jigsaw puzzle. Function patches2image will reconstruct the image back from the patches- The class JigsawSolver inherits the Jigsaw class. You need to make changes only to the JigsawSolver class. Write function `solve` to create our own solver, the function can get a numpy array of shape RxCxHxWx3 as input, where R & C are the number of rows and cols in the jigsaw puzzle respectively and HxW is the height and width of each peice. The output of the function should be a numpy array of shape RxCxHxWx3 which would be the solution.
###Code
class Jigsaw():
def __init__(self,path=None):
super(Jigsaw,self).__init__()
"""
Initialize your Jigsaw puzzle, you can provide path to an image or we will load the default image
"""
if path is None: # Load default image
with cbook.get_sample_data('grace_hopper.png') as image_file:
self.image = plt.imread(image_file)
else:
self.image = plt.imread(path) # Load image of your choice
self.original_size = self.image.shape[0:2] #Store the original size of the image
self.new_size = (500,500) # Store new image size, 500x500
self.rows = 5 # number of rows in jigsaw puzzle
self.cols = 5 # number of cols in jigsaw puzzle
# RxCxHxWx3 numpy array storing the jigsaw puzzle
self.jigsaw = self.create_jigsaw_puzzle(self.image,self.new_size,self.rows,self.cols)
def image2patches(self,image,new_size,rows,cols):
"""
This function converts an image to patches for jigsaw puzzle
@param: image -> input image
@param: new_size -> shape to which image will be resized
@param: rows -> number of rows in the jigsaw puzzle
@param: rows -> number of cols in the jigsaw puzzle
@returns: patches -> RxCxHxWx3 numpy array storing the jigsaw puzzle's patches
"""
image = resize(image,self.new_size) # resize the original image
patches = np.zeros((self.rows,self.cols,self.new_size[0]//self.rows,self.new_size[1]//self.cols,3)) # store the jigsaw puzzle here
for i in range(self.rows): # for every row of jigsaw
for j in range(self.cols): # for every col of jigsaw
patches[i,j,:,:,:] = self.image[i*self.new_size[0]//self.rows:(i+1)*self.new_size[0]//self.rows,j*self.new_size[0]//self.cols:(j+1)*self.new_size[0]//self.cols,:]
return patches
def patches2image(self,patches,original_size):
"""
This function recreates images from patches
@param: patches -> RxCxHxWx3 numpy array storing the jigsaw puzzle's patches
@param: orignal_size -> shape to which image will be resized
@returns: image -> output image
"""
R,C,H,W,_ = patches.shape
image = np.zeros((R*H,C*W,3))
for i in range(R):
for j in range(C):
image[i*H:(i+1)*H,j*W:(j+1)*W,:] = patches[i,j,:,:,:]
image = resize(image,original_size)
return image
def create_jigsaw_puzzle(self,image,new_size,rows,cols):
patches = self.image2patches(image,new_size,rows,cols)
R,C,H,W,_ = patches.shape
patches = patches.reshape((-1,H,W,3))
patches = patches[np.random.permutation(R*C),...].reshape((R,C,H,W,3))
return patches
###Output
_____no_output_____
###Markdown
**Write the formulation of problem here**
###Code
# Create your jigsaw puzzle solver here
class JigsawSolver(Jigsaw):
def __init__(self,*args,**kwargs):
super(JigsawSolver,self).__init__()
pass
def plot(self):
fig = plt.figure(figsize=(24,8))
ax1 = fig.add_subplot(131)
ax1.imshow(self.patches2image(self.jigsaw,self.original_size))
ax1.axis('off')
ax1.set_title("Input Jigsaw")
# Create your plot here
# ax2 = fig.add_subplot(132)
# ax2.imshow(self.image)
# ax2.axis('off')
# ax2.set_title("Reconstructed Image")
ax3 = fig.add_subplot(133)
ax3.imshow(self.image)
ax3.axis('off')
ax3.set_title("Original Image")
plt.show()
def solve(self):
"""
Write your solver here
"""
pass
solver = JigsawSolver(path=None)
solver.solve()
solver.plot()
###Output
_____no_output_____ |
d210127_cr_calculators/predicting_noise.ipynb | ###Markdown
Predicting charge fluctuations due to NSB
###Code
nsb_list = np.geomspace(10, 1000, 10)
opct_list = [0.1, 0.5, 0.9]
d_list = []
for nsb in tqdm(nsb_list):
for opct in opct_list:
pulse = GenericPulse(time, value, mv_per_pe=None)
spectrum = SiPMPrompt(opct=opct, normalise_charge=True)
coupling = ACOffsetCoupling(nsb_rate=nsb, pulse_area=pulse.area, spectrum_average=spectrum.average)
camera = Camera(
continuous_readout_duration=128,
n_waveform_samples=128,
mapping=SSTCameraMapping(n_pixels=1),
photoelectron_pulse=pulse,
photoelectron_spectrum=spectrum,
coupling=coupling,
)
source = PhotoelectronSource(camera=camera)
acquisition = EventAcquisition(camera=camera)
for iev in range(n_events):
pe = source.get_nsb(nsb)
charge = pe.get_time_slice(window_start, window_end).get_charge_per_pixel(1)[0] - coupling.offset * window_width
d_list.append(dict(
nsb=nsb,
opct=opct,
enf=spectrum.excess_noise_factor,
iev=iev,
charge=charge,
))
df = pd.DataFrame(d_list)
def prediction_nsb(nsb, enf):
return np.sqrt(nsb * 1e6 * window_width * 1e-9 * enf)
fig, ax = plt.subplots(figsize=(20, 10))
for enf, group in df.groupby('enf'):
color = ax._get_lines.get_next_color()
gb_nsb = group.groupby('nsb').std()
nsb = gb_nsb.index.values
std = gb_nsb['charge'].values
yerr = std / np.sqrt(2 * n_events - 1)
ax.errorbar(nsb, std, yerr=yerr, fmt='.', color=color, label=enf)
std_predict = prediction_nsb(nsb, enf)
ax.plot(nsb, std_predict, '--', color=color)
ax.set_xlabel("NSB Rate (MHz)")
ax.set_ylabel("StdDev")
ax.set_xscale("log")
ax.legend(title="ENF")
###Output
_____no_output_____
###Markdown
Predicting charge fluctuations due to electronic/digitisation noise
###Code
noise_stddev_list = np.linspace(0, 10, 10)
d_list = []
for wf_stddev in tqdm(noise_stddev_list):
noise = GaussianNoise(stddev=wf_stddev)
camera = Camera(
continuous_readout_duration=128,
n_waveform_samples=128,
mapping=SSTCameraMapping(n_pixels=1),
digitisation_noise=noise,
)
source = PhotoelectronSource(camera=camera)
acquisition = EventAcquisition(camera=camera)
for iev in range(n_events):
pe = Photoelectrons.empty()
readout = acquisition.get_continuous_readout(pe)
waveform = acquisition.get_sampled_waveform(readout)
charge = waveform[0, window_start:window_end].sum()
d_list.append(dict(
wf_stddev=wf_stddev,
iev=iev,
charge=charge,
))
df = pd.DataFrame(d_list)
def prediction_noise(stddev):
return stddev * np.sqrt(window_width)
fig, ax = plt.subplots(figsize=(20, 10))
gb_wf_stddev = df.groupby('wf_stddev').std()
wf_stddev = gb_wf_stddev.index.values
std = gb_wf_stddev['charge'].values
yerr = std / np.sqrt(2 * n_events - 1)
ax.errorbar(wf_stddev, std, yerr=yerr, fmt='.', color=color)
std_predict = prediction_noise(wf_stddev)
ax.plot(wf_stddev, std_predict, '--', color=color)
ax.set_xlabel("Digitisation Noise StdDev")
ax.set_ylabel("Charge StdDev")
###Output
_____no_output_____
###Markdown
Predicting total charge fluctuations due to noise
###Code
noise_stddev_list = [0.1, 0.5]
nsb_list = np.geomspace(10, 1000, 10)
opct_list = [0.1, 0.5, 0.9]
d_list = []
for nsb in tqdm(nsb_list):
for opct in opct_list:
for wf_stddev in noise_stddev_list:
noise = GaussianNoise(stddev=wf_stddev)
spectrum = SiPMPrompt(opct=opct, normalise_charge=True)
coupling = ACOffsetCoupling(nsb_rate=nsb, pulse_area=pulse.area, spectrum_average=spectrum.average)
camera = Camera(
continuous_readout_duration=128,
n_waveform_samples=128,
mapping=SSTCameraMapping(n_pixels=1),
photoelectron_pulse=pulse,
photoelectron_spectrum=spectrum,
coupling=coupling,
digitisation_noise=noise,
)
source = PhotoelectronSource(camera=camera)
acquisition = EventAcquisition(camera=camera)
for iev in range(n_events):
pe = source.get_nsb(nsb)
readout = acquisition.get_continuous_readout(pe)
waveform = acquisition.get_sampled_waveform(readout)
charge = waveform[0, window_start:window_end].sum()
d_list.append(dict(
nsb=nsb,
opct=opct,
wf_stddev=wf_stddev,
enf=spectrum.excess_noise_factor,
iev=iev,
charge=charge,
))
df = pd.DataFrame(d_list)
def prediction_total(nsb, enf, stddev):
return np.sqrt(prediction_nsb(nsb, enf)**2 + prediction_noise(stddev)**2)
fig, ax = plt.subplots(figsize=(20, 10))
for (enf, wf_stddev), group in df.groupby(['enf', 'wf_stddev']):
color = ax._get_lines.get_next_color()
gb_nsb = group.groupby('nsb').std()
nsb = gb_nsb.index.values
std = gb_nsb['charge'].values
yerr = std / np.sqrt(2 * n_events - 1)
ax.errorbar(nsb, std, yerr=yerr, fmt='.', color=color, label=f"{enf}, {wf_stddev}")
std_predict = prediction_total(nsb, enf, wf_stddev)
ax.plot(nsb, std_predict, '--', color=color)
ax.set_xlabel("NSB Rate (MHz)")
ax.set_ylabel("StdDev")
ax.set_xscale("log")
ax.legend(title="ENF, WF StdDev")
###Output
_____no_output_____ |
talks/Feb2019-Intro-Pandas-Dask/Example-Dask-multi-csvs.ipynb | ###Markdown
Year,Month,DayofMonth,DayOfWeek,DepTime,CRSDepTime,ArrTime,CRSArrTime,UniqueCarrier,FlightNum,TailNum,ActualElapsedTime,CRSElapsedTime,AirTime,ArrDelay,DepDelay,Origin,Dest,Distance,TaxiIn,TaxiOut,Cancelled,Diverted1991,1,8,2,1215.0,1215,1340.0,1336,US,121,,85.0,81,,4.0,0.0,EWR,PIT,319.0,,,0,01991,1,9,3,1215.0,1215,1353.0,1336,US,121,,98.0,81,,17.0,0.0,EWR,PIT,319.0,,,0,01991,1,10,4,1216.0,1215,1332.0,1336,US,121,,76.0,81,,-4.0,1.0,EWR,PIT,319.0,,,0,0
###Code
import dask.dataframe as dd
ddf = dd.read_csv(fnames, dtype={'CRSElapsedTime': 'object','TailNum': 'object'})
ddf.columns
import pandas as pd
###Output
_____no_output_____
###Markdown
Let's compare the time that Pandas and Dask take to evalute the unique elements of a column
###Code
%%time
ddf['UniqueCarrier'].unique().compute().tolist()`
%%time
carriers = []
for fname in fnames:
df = pd.read_csv(fname, dtype={'CRSElapsedTime': 'object','TailNum': 'object'})
carriers.extend(df.UniqueCarrier.unique().tolist())
print(list(set(carriers)))
###Output
['HP', 'NW', 'ML (1)', 'EA', 'PA (1)', 'TW', 'UA', 'DL', 'US', 'CO', 'AA']
CPU times: user 5.56 s, sys: 90.8 ms, total: 5.65 s
Wall time: 5.67 s
###Markdown
Let's get and display the average yearly departure delay
###Code
ser = ddf.groupby('Year')['DepDelay'].mean()
ser.visualize(filename='multi-csvs.svg')
%matplotlib notebook
ser.compute().plot.bar()
###Output
_____no_output_____ |
Archive/01-AML-TF-Single-GPU/01-train-single_gpu.ipynb | ###Markdown
Tensorflow SingleGPU training
###Code
import os
data_folder = 'datasets'
os.makedirs(data_folder, exist_ok=True)
import tensorflow as tf
tf.__version__
###Output
_____no_output_____
###Markdown
Display examples of imagesRead a few images from the training TFRecords file
###Code
def parse(example_proto):
features = {'label': tf.FixedLenFeature((), tf.int64, default_value=0),
'image': tf.FixedLenFeature((), tf.string, default_value="")}
parsed_features = tf.parse_single_example(example_proto, features)
label = parsed_features['label']
image = image = tf.decode_raw(parsed_features['image'], tf.uint8)
image = tf.reshape(image, (224, 224, 3,))
return image, label
files = ['aerialsmall_train.tfrecords']
dataset = tf.data.TFRecordDataset(files)
dataset = dataset.map(parse)
dataset = dataset.shuffle(1000)
iterator = dataset.make_one_shot_iterator()
images = []
labels = []
with tf.Session() as sess:
for i in range(3):
image, label = sess.run(iterator.get_next())
images.append(image)
labels.append(label)
###Output
_____no_output_____
###Markdown
Display the images.
###Code
import os
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
%matplotlib inline
figsize=(10, 8)
fig, axis = plt.subplots(len(images)//3, 3, figsize=figsize)
fig.tight_layout()
image
for ax, image, label in zip(axis.flat[0:], images, labels):
ax.set_title(str(label))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.imshow(image)
from tensorflow.keras.applications import resnet50
# Returns a dataset based on a list of TFRecords files passsed as a parameters.
def create_dataset(files, batch_size=2, train=True, buffer_size=10000):
IMAGE_SHAPE = (224, 224, 3,)
NUM_CLASSES = 6
def _parse(example_proto):
features = {'label': tf.FixedLenFeature((), tf.int64, default_value=0),
'image': tf.FixedLenFeature((), tf.string, default_value="")}
parsed_features = tf.parse_single_example(example_proto, features)
label = parsed_features['label']
#label = tf.one_hot(label, NUM_CLASSES)
image = image = tf.decode_raw(parsed_features['image'], tf.uint8)
image = tf.cast(image, tf.float32)
#image = scale_image(image)
image = tf.reshape(image, IMAGE_SHAPE)
return image, label
def _resnet_preprocess(images, labels):
images = resnet50.preprocess_input(images)
return images, labels
dataset = tf.data.TFRecordDataset(files)
dataset = dataset.map(_parse)
#if train:
#dataset = dataset.shuffle(buffer_size)
dataset = dataset.batch(batch_size)
dataset = dataset.map(_resnet_preprocess)
#dataset = dataset.repeat()
return dataset
files = ['aerialsmall_train.tfrecords']
dataset = create_dataset(files)
iterator = dataset.make_one_shot_iterator()
with tf.Session() as sess:
images, labels = sess.run(iterator.get_next())
print(images.shape)
print(labels.shape)
print(images)
###Output
[[[[-19.939003 12.221001 -44.68 ]
[-29.939003 -6.7789993 -56.68 ]
[-49.939003 -35.779 -79.68 ]
...
[-22.939003 1.2210007 -48.68 ]
[-29.939003 -12.778999 -56.68 ]
[-36.939003 -30.779 -67.68 ]]
[[ -5.939003 30.221 -29.68 ]
[-14.939003 9.221001 -42.68 ]
[-34.939003 -21.779 -65.68 ]
...
[-21.939003 6.2210007 -45.68 ]
[-23.939003 -2.7789993 -51.68 ]
[-28.939003 -15.778999 -58.68 ]]
[[ -7.939003 33.221 -30.68 ]
[-11.939003 17.221 -38.68 ]
[-25.939003 -8.778999 -55.68 ]
...
[-14.939003 13.221001 -40.68 ]
[-23.939003 0.22100067 -50.68 ]
[-32.939003 -12.778999 -60.68 ]]
...
[[-27.939003 -4.7789993 -52.68 ]
[-31.939003 -9.778999 -57.68 ]
[-25.939003 -5.7789993 -53.68 ]
...
[ 63.060997 69.221 58.32 ]
[ 55.060997 65.221 53.32 ]
[ 43.060997 55.221 42.32 ]]
[[-26.939003 -3.7789993 -50.68 ]
[-34.939003 -12.778999 -60.68 ]
[-35.939003 -15.778999 -63.68 ]
...
[ 79.061 81.221 71.32 ]
[ 74.061 79.221 68.32 ]
[ 54.060997 63.221 50.32 ]]
[[-31.939003 -8.778999 -56.68 ]
[-33.939003 -10.778999 -59.68 ]
[-39.939003 -18.779 -66.68 ]
...
[ 90.061 89.221 80.32 ]
[ 83.061 83.221 73.32 ]
[ 61.060997 66.221 53.32 ]]]
[[[-14.939003 -25.779 -46.68 ]
[ -5.939003 -17.779 -38.68 ]
[ -9.939003 -21.779 -42.68 ]
...
[ 3.060997 54.221 17.32 ]
[ 5.060997 57.221 19.32 ]
[ 7.060997 60.221 21.32 ]]
[[ -8.939003 -18.779 -40.68 ]
[ 3.060997 -7.7789993 -28.68 ]
[ -5.939003 -17.779 -38.68 ]
...
[ 8.060997 57.221 20.32 ]
[ 7.060997 57.221 19.32 ]
[ 6.060997 57.221 18.32 ]]
[[ -7.939003 -15.778999 -38.68 ]
[ -9.939003 -18.779 -41.68 ]
[-23.939003 -34.779 -55.68 ]
...
[ 12.060997 59.221 22.32 ]
[ 8.060997 57.221 18.32 ]
[ 5.060997 54.221 15.32 ]]
...
[[ 2.060997 55.221 7.3199997 ]
[ 6.060997 58.221 11.32 ]
[ 3.060997 52.221 7.3199997 ]
...
[ 16.060997 62.221 21.32 ]
[ 25.060997 66.221 26.32 ]
[ 24.060997 61.221 22.32 ]]
[[ 2.060997 55.221 8.32 ]
[ 8.060997 61.221 14.32 ]
[ 10.060997 60.221 16.32 ]
...
[ 15.060997 61.221 19.32 ]
[ 20.060997 63.221 22.32 ]
[ 18.060997 60.221 19.32 ]]
[[ 4.060997 56.221 10.32 ]
[ 9.060997 59.221 15.32 ]
[ 10.060997 58.221 16.32 ]
...
[ 14.060997 60.221 18.32 ]
[ 16.060997 61.221 19.32 ]
[ 14.060997 59.221 16.32 ]]]]
|
StyleGan2_ADA_Custom_Edited.ipynb | ###Markdown
###Code
# https://github.com/dvschultz/ml-art-colabs/blob/master/Stylegan2_ada_Custom_Training.ipynb
###Output
_____no_output_____
###Markdown
Custom Training StyleGan2-ADA StyleGAN2-ADA only work with Tensorflow 1. Run the next cell before anything else to make sure we’re using TF1 and not TF2.You're going to need at least 12 GB of RAM for the stylegan-2 to run. With Google Collab, you'd need pro or pro plus unfortunatley.
###Code
!pip install numpy==1.19.5 # For some reason, need to downgrade to run.
%tensorflow_version 1.x
!pip show tensorflow
!nvidia-smi
###Output
Sun Mar 20 03:02:52 2022
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |
| N/A 34C P0 25W / 250W | 0MiB / 16280MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| No running processes found |
+-----------------------------------------------------------------------------+
###Markdown
Install Repo to Google Drive Colab is a little funky with training. I’ve found the best way to do this is to install the repo directly into your Google Drive folder.First, mount your Drive to the Colab notebook:
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
Mounted at /content/drive
###Markdown
Next, run this cell. If you’re already installed the repo, it will skip the installation process and change into the repo’s directory. If you haven’t installed it, it will install all the files necessary.
###Code
import os
if os.path.isdir("/content/drive/My Drive/colab-sg2-ada"):
%cd "/content/drive/My Drive/colab-sg2-ada/stylegan2-ada"
else:
#install script
%cd "/content/drive/My Drive/"
!mkdir colab-sg2-ada
%cd colab-sg2-ada
!git clone https://github.com/dvschultz/stylegan2-ada
%cd stylegan2-ada
!mkdir downloads
!mkdir datasets
%cd "/content/drive/My Drive/colab-sg2-ada/stylegan2-ada"
!git config --global user.name "test"
!git config --global user.email "[email protected]"
!git fetch origin
!git checkout origin/main -- train.py
###Output
/content/drive/My Drive/colab-sg2-ada/stylegan2-ada
###Markdown
Convert dataset to .tfrecords Note: You only need to do this once per dataset. If you have already run this and are returning to conntinue training, skip these cells. Next we need to convert our image dataset to a format that StyleGAN2-ADA can read from. There are two options here. You can upload your dataset directly to Colab (as a zipped file), or you can upload it to Drive directly and read it from there.
###Code
# # #if you manually uploaded your dataset to Colab, unzip it
# zip_path = "/content/CAT1024.zip"
# !unzip {zip_path} -d /content/
###Output
_____no_output_____
###Markdown
Now that your image dataset is uploaded, we need to convert it to the .tfrecords format.Depending on the resolution of your images and how many you have, this can take a while.
###Code
#update this to the path to your image folder
dataset_path = "/content/drive/MyDrive/artists" #/content/drive/MyDrive/abstract abstract_tf for entire ~7500 images.
#give your dataset a name
dataset_name = "artists_tf" #abstract_tf for entire ~7500 images.
#you don't need to edit anything here
!python dataset_tool.py create_from_images ./datasets/{dataset_name} {dataset_path}
"""NOTE: I move the ./datasets/dataset_name/ to drive/My Drive so I don't have to rerun this cell."""
###Output
Loading images from "/content/drive/MyDrive/artists"
Creating dataset "./datasets/artists_tf"
dataset_tool.py:97: DeprecationWarning: tostring() is deprecated. Use tobytes() instead.
'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))}))
Added 21309 images.
###Markdown
Train a custom model We’re ready to start training! There are numerous arguments to training, what’s listed below are the most popular options. To see all the options, run the following cell.
###Code
!python train.py --help
#this name must EXACTLY match the dataset name you used when creating the .tfrecords file
dataset_name = "artists_tf" # abstract_tf
#how often should the model generate samples and a .pkl file
snapshot_count = 5
#should the images be mirrored left to right?
mirrored = True
#should the images be mirrored top to bottom?
mirroredY = True
"""Since mirrored vertically and horizontally, we now have x4 as many records."""
#metrics?
metric_list = None
#augments
# augs = "bgcfnc"
#
# this is the most important cell to update
#
# running it for the first time? set it to ffhq(+resolution)
# resuming? get the path to your latest .pkl file and use that
resume_from = "/content/drive/MyDrive/results_artists_gan/00002-artists_tf-mirror-mirrory-11gb-gpu-bgcfnc-resumecustom/network-snapshot-000060.pkl" # Location of latest .pkl weights. --resume={resume_from}
!python train.py --outdir=/content/drive/MyDrive/results_artists_gan --snap={snapshot_count} --cfg=11gb-gpu --data=/content/drive/MyDrive/{dataset_name} --mirror={mirrored} --mirrory={mirroredY} --metrics={metric_list} --augpipe=bgcfnc --resume={resume_from}
###Output
tcmalloc: large alloc 4294967296 bytes == 0x55990819e000 @ 0x7fec2ca90001 0x7fec29c9354f 0x7fec29ce3b58 0x7fec29ce7b17 0x7fec29d86203 0x559900000424 0x559900000120 0x559900074b80 0x55990006f66e 0x55990000236c 0x5599000437b9 0x5599000406d4 0x559900000c29 0x559900074e61 0x55990006f02f 0x5598fff40e2b 0x559900071633 0x55990006f02f 0x5598fff40e2b 0x559900071633 0x55990006f66e 0x5598fff40e2b 0x559900071633 0x5599000019da 0x55990006feae 0x55990006f02f 0x55990006ed43 0x559900139302 0x55990013967d 0x559900139526 0x5599001111d3
tcmalloc: large alloc 4294967296 bytes == 0x559a0819e000 @ 0x7fec2ca8e1e7 0x7fec29c9346e 0x7fec29ce3c7b 0x7fec29ce435f 0x7fec29d86103 0x559900000424 0x559900000120 0x559900074b80 0x55990006f02f 0x559900001aba 0x559900070cd4 0x55990006f02f 0x559900001aba 0x559900070cd4 0x55990006f02f 0x559900001aba 0x559900070cd4 0x5599000019da 0x55990006feae 0x55990006f02f 0x559900001aba 0x5599000742c0 0x55990006f02f 0x559900001aba 0x559900070cd4 0x55990006f66e 0x55990000236c 0x5599000437b9 0x5599000406d4 0x559900000c29 0x559900074e61
tcmalloc: large alloc 4294967296 bytes == 0x559b09486000 @ 0x7fec2ca8e1e7 0x7fec29c9346e 0x7fec29ce3c7b 0x7fec29ce435f 0x7febb368f235 0x7febb3012792 0x7febb3012d42 0x7febb2fcbaee 0x559900000317 0x559900000120 0x559900074679 0x5599000019da 0x559900070108 0x55990006f1c0 0x5598fff40eb0 0x559900071633 0x55990006f02f 0x559900001aba 0x559900070108 0x55990006f66e 0x559900001aba 0x559900070108 0x5599000019da 0x559900070108 0x55990006f02f 0x559900002151 0x559900002571 0x559900071633 0x55990006f02f 0x559900001aba 0x55990006feae
Training options:
{
"G_args": {
"func_name": "training.networks.G_main",
"fmap_base": 16384,
"fmap_max": 512,
"mapping_layers": 8,
"num_fp16_res": 4,
"conv_clamp": 256
},
"D_args": {
"func_name": "training.networks.D_main",
"mbstd_group_size": 4,
"fmap_base": 16384,
"fmap_max": 512,
"num_fp16_res": 4,
"conv_clamp": 256
},
"G_opt_args": {
"beta1": 0.0,
"beta2": 0.99,
"learning_rate": 0.002
},
"D_opt_args": {
"beta1": 0.0,
"beta2": 0.99,
"learning_rate": 0.002
},
"loss_args": {
"func_name": "training.loss.stylegan2",
"r1_gamma": 10
},
"augment_args": {
"class_name": "training.augment.AdaptiveAugment",
"tune_heuristic": "rt",
"tune_target": 0.6,
"apply_func": "training.augment.augment_pipeline",
"apply_args": {
"xflip": 1,
"rotate90": 1,
"xint": 1,
"scale": 1,
"rotate": 1,
"aniso": 1,
"xfrac": 1,
"brightness": 1,
"contrast": 1,
"lumaflip": 1,
"hue": 1,
"saturation": 1,
"imgfilter": 1,
"noise": 1,
"cutout": 1
},
"tune_kimg": 100
},
"num_gpus": 1,
"image_snapshot_ticks": 5,
"network_snapshot_ticks": 5,
"train_dataset_args": {
"path": "/content/drive/MyDrive/artists_tf",
"max_label_size": 0,
"use_raw": false,
"resolution": 1024,
"mirror_augment": true,
"mirror_augment_v": true
},
"metric_arg_list": [],
"metric_dataset_args": {
"path": "/content/drive/MyDrive/artists_tf",
"max_label_size": 0,
"use_raw": false,
"resolution": 1024,
"mirror_augment": true,
"mirror_augment_v": true
},
"total_kimg": 25000,
"minibatch_size": 4,
"minibatch_gpu": 4,
"G_smoothing_kimg": 10,
"G_smoothing_rampup": null,
"resume_pkl": "/content/drive/MyDrive/results_artists_gan/00002-artists_tf-mirror-mirrory-11gb-gpu-bgcfnc-resumecustom/network-snapshot-000060.pkl",
"run_dir": "/content/drive/MyDrive/results_artists_gan/00003-artists_tf-mirror-mirrory-11gb-gpu-bgcfnc-resumecustom"
}
Output directory: /content/drive/MyDrive/results_artists_gan/00003-artists_tf-mirror-mirrory-11gb-gpu-bgcfnc-resumecustom
Training data: /content/drive/MyDrive/artists_tf
Training length: 25000 kimg
Resolution: 1024
Number of GPUs: 1
Creating output directory...
Loading training set...
tcmalloc: large alloc 4294967296 bytes == 0x559907e9c000 @ 0x7fec2ca90001 0x7fec29c9354f 0x7fec29ce3b58 0x7fec29ce7b17 0x7fec29d86203 0x559900000424 0x559900000120 0x559900074b80 0x55990006f66e 0x55990000236c 0x5599000437b9 0x5599000406d4 0x559900000c29 0x559900074e61 0x55990006f02f 0x5598fff40e2b 0x559900071633 0x55990006f02f 0x5598fff40e2b 0x559900071633 0x55990006f66e 0x5598fff40e2b 0x559900071633 0x5599000019da 0x55990006feae 0x55990006f02f 0x55990006ed43 0x559900139302 0x55990013967d 0x559900139526 0x5599001111d3
tcmalloc: large alloc 4294967296 bytes == 0x559c09486000 @ 0x7fec2ca8e1e7 0x7fec29c9346e 0x7fec29ce3c7b 0x7fec29ce435f 0x7fec29d86103 0x559900000424 0x559900000120 0x559900074b80 0x55990006f02f 0x559900001aba 0x559900070cd4 0x55990006f02f 0x559900001aba 0x559900070cd4 0x55990006f02f 0x559900001aba 0x559900070cd4 0x5599000019da 0x55990006feae 0x55990006f02f 0x559900001aba 0x5599000742c0 0x55990006f02f 0x559900001aba 0x559900070cd4 0x55990006f66e 0x55990000236c 0x5599000437b9 0x5599000406d4 0x559900000c29 0x559900074e61
tcmalloc: large alloc 4294967296 bytes == 0x559c09486000 @ 0x7fec2ca8e1e7 0x7fec29c9346e 0x7fec29ce3c7b 0x7fec29ce435f 0x7febb368f235 0x7febb3012792 0x7febb3012d42 0x7febb2fcbaee 0x559900000317 0x559900000120 0x559900074679 0x5599000019da 0x559900070108 0x55990006f1c0 0x5598fff40eb0 0x559900071633 0x55990006f02f 0x559900001aba 0x559900070108 0x55990006f66e 0x559900001aba 0x559900070108 0x5599000019da 0x559900070108 0x55990006f02f 0x559900002151 0x559900002571 0x559900071633 0x55990006f02f 0x559900001aba 0x55990006feae
Image shape: [3, 1024, 1024]
Label shape: [0]
Constructing networks...
Setting up TensorFlow plugin "fused_bias_act.cu": Compiling... Loading... Done.
Setting up TensorFlow plugin "upfirdn_2d.cu": Compiling... Loading... Done.
Resuming from "/content/drive/MyDrive/results_artists_gan/00002-artists_tf-mirror-mirrory-11gb-gpu-bgcfnc-resumecustom/network-snapshot-000060.pkl"
G Params OutputShape WeightShape
--- --- --- ---
latents_in - (?, 512) -
labels_in - (?, 0) -
epochs 1 () ()
epochs_1 1 () ()
G_mapping/Normalize - (?, 512) -
G_mapping/Dense0 262656 (?, 512) (512, 512)
G_mapping/Dense1 262656 (?, 512) (512, 512)
G_mapping/Dense2 262656 (?, 512) (512, 512)
G_mapping/Dense3 262656 (?, 512) (512, 512)
G_mapping/Dense4 262656 (?, 512) (512, 512)
G_mapping/Dense5 262656 (?, 512) (512, 512)
G_mapping/Dense6 262656 (?, 512) (512, 512)
G_mapping/Dense7 262656 (?, 512) (512, 512)
G_mapping/Broadcast - (?, 18, 512) -
dlatent_avg - (512,) -
Truncation/Lerp - (?, 18, 512) -
G_synthesis/4x4/Const 8192 (?, 512, 4, 4) (1, 512, 4, 4)
G_synthesis/4x4/Conv 2622465 (?, 512, 4, 4) (3, 3, 512, 512)
G_synthesis/4x4/ToRGB 264195 (?, 3, 4, 4) (1, 1, 512, 3)
G_synthesis/8x8/Conv0_up 2622465 (?, 512, 8, 8) (3, 3, 512, 512)
G_synthesis/8x8/Conv1 2622465 (?, 512, 8, 8) (3, 3, 512, 512)
G_synthesis/8x8/Upsample - (?, 3, 8, 8) -
G_synthesis/8x8/ToRGB 264195 (?, 3, 8, 8) (1, 1, 512, 3)
G_synthesis/16x16/Conv0_up 2622465 (?, 512, 16, 16) (3, 3, 512, 512)
G_synthesis/16x16/Conv1 2622465 (?, 512, 16, 16) (3, 3, 512, 512)
G_synthesis/16x16/Upsample - (?, 3, 16, 16) -
G_synthesis/16x16/ToRGB 264195 (?, 3, 16, 16) (1, 1, 512, 3)
G_synthesis/32x32/Conv0_up 2622465 (?, 512, 32, 32) (3, 3, 512, 512)
G_synthesis/32x32/Conv1 2622465 (?, 512, 32, 32) (3, 3, 512, 512)
G_synthesis/32x32/Upsample - (?, 3, 32, 32) -
G_synthesis/32x32/ToRGB 264195 (?, 3, 32, 32) (1, 1, 512, 3)
G_synthesis/64x64/Conv0_up 2622465 (?, 512, 64, 64) (3, 3, 512, 512)
G_synthesis/64x64/Conv1 2622465 (?, 512, 64, 64) (3, 3, 512, 512)
G_synthesis/64x64/Upsample - (?, 3, 64, 64) -
G_synthesis/64x64/ToRGB 264195 (?, 3, 64, 64) (1, 1, 512, 3)
G_synthesis/128x128/Conv0_up 1442561 (?, 256, 128, 128) (3, 3, 512, 256)
G_synthesis/128x128/Conv1 721409 (?, 256, 128, 128) (3, 3, 256, 256)
G_synthesis/128x128/Upsample - (?, 3, 128, 128) -
G_synthesis/128x128/ToRGB 132099 (?, 3, 128, 128) (1, 1, 256, 3)
G_synthesis/256x256/Conv0_up 426369 (?, 128, 256, 256) (3, 3, 256, 128)
G_synthesis/256x256/Conv1 213249 (?, 128, 256, 256) (3, 3, 128, 128)
G_synthesis/256x256/Upsample - (?, 3, 256, 256) -
G_synthesis/256x256/ToRGB 66051 (?, 3, 256, 256) (1, 1, 128, 3)
G_synthesis/512x512/Conv0_up 139457 (?, 64, 512, 512) (3, 3, 128, 64)
G_synthesis/512x512/Conv1 69761 (?, 64, 512, 512) (3, 3, 64, 64)
G_synthesis/512x512/Upsample - (?, 3, 512, 512) -
G_synthesis/512x512/ToRGB 33027 (?, 3, 512, 512) (1, 1, 64, 3)
G_synthesis/1024x1024/Conv0_up 51297 (?, 32, 1024, 1024) (3, 3, 64, 32)
G_synthesis/1024x1024/Conv1 25665 (?, 32, 1024, 1024) (3, 3, 32, 32)
G_synthesis/1024x1024/Upsample - (?, 3, 1024, 1024) -
G_synthesis/1024x1024/ToRGB 16515 (?, 3, 1024, 1024) (1, 1, 32, 3)
--- --- --- ---
Total 30370062
D Params OutputShape WeightShape
--- --- --- ---
images_in - (?, 3, 1024, 1024) -
labels_in - (?, 0) -
1024x1024/FromRGB 128 (?, 32, 1024, 1024) (1, 1, 3, 32)
1024x1024/Conv0 9248 (?, 32, 1024, 1024) (3, 3, 32, 32)
1024x1024/Conv1_down 18496 (?, 64, 512, 512) (3, 3, 32, 64)
1024x1024/Skip 2048 (?, 64, 512, 512) (1, 1, 32, 64)
512x512/Conv0 36928 (?, 64, 512, 512) (3, 3, 64, 64)
512x512/Conv1_down 73856 (?, 128, 256, 256) (3, 3, 64, 128)
512x512/Skip 8192 (?, 128, 256, 256) (1, 1, 64, 128)
256x256/Conv0 147584 (?, 128, 256, 256) (3, 3, 128, 128)
256x256/Conv1_down 295168 (?, 256, 128, 128) (3, 3, 128, 256)
256x256/Skip 32768 (?, 256, 128, 128) (1, 1, 128, 256)
128x128/Conv0 590080 (?, 256, 128, 128) (3, 3, 256, 256)
128x128/Conv1_down 1180160 (?, 512, 64, 64) (3, 3, 256, 512)
128x128/Skip 131072 (?, 512, 64, 64) (1, 1, 256, 512)
64x64/Conv0 2359808 (?, 512, 64, 64) (3, 3, 512, 512)
64x64/Conv1_down 2359808 (?, 512, 32, 32) (3, 3, 512, 512)
64x64/Skip 262144 (?, 512, 32, 32) (1, 1, 512, 512)
32x32/Conv0 2359808 (?, 512, 32, 32) (3, 3, 512, 512)
32x32/Conv1_down 2359808 (?, 512, 16, 16) (3, 3, 512, 512)
32x32/Skip 262144 (?, 512, 16, 16) (1, 1, 512, 512)
16x16/Conv0 2359808 (?, 512, 16, 16) (3, 3, 512, 512)
16x16/Conv1_down 2359808 (?, 512, 8, 8) (3, 3, 512, 512)
16x16/Skip 262144 (?, 512, 8, 8) (1, 1, 512, 512)
8x8/Conv0 2359808 (?, 512, 8, 8) (3, 3, 512, 512)
8x8/Conv1_down 2359808 (?, 512, 4, 4) (3, 3, 512, 512)
8x8/Skip 262144 (?, 512, 4, 4) (1, 1, 512, 512)
4x4/MinibatchStddev - (?, 513, 4, 4) -
4x4/Conv 2364416 (?, 512, 4, 4) (3, 3, 513, 512)
4x4/Dense0 4194816 (?, 512) (8192, 512)
Output 513 (?, 1) (512, 1)
--- --- --- ---
Total 29012513
Exporting sample images...
Replicating networks across 1 GPUs...
Initializing augmentations...
Setting up optimizers...
Constructing training graph...
Finalizing training ops...
Initializing metrics...
Training for 25000 kimg...
tick 0 kimg 0.0 time 2m 33s sec/tick 28.0 sec/kimg 1751.20 maintenance 125.4 gpumem 10.2 augment 0.000
tick 1 kimg 4.0 time 35m 48s sec/tick 1979.4 sec/kimg 494.85 maintenance 15.6 gpumem 10.2 augment 0.006
tick 2 kimg 8.0 time 1h 08m 50s sec/tick 1981.2 sec/kimg 495.31 maintenance 0.0 gpumem 10.2 augment 0.009
tick 3 kimg 12.0 time 1h 41m 51s sec/tick 1981.7 sec/kimg 495.41 maintenance 0.0 gpumem 10.2 augment 0.011
tick 4 kimg 16.0 time 2h 14m 55s sec/tick 1983.3 sec/kimg 495.81 maintenance 0.0 gpumem 10.2 augment 0.016
tick 5 kimg 20.0 time 2h 47m 59s sec/tick 1984.5 sec/kimg 496.12 maintenance 0.0 gpumem 10.2 augment 0.022
tick 6 kimg 24.0 time 3h 21m 10s sec/tick 1987.3 sec/kimg 496.82 maintenance 3.8 gpumem 10.2 augment 0.028
tick 7 kimg 28.0 time 3h 54m 18s sec/tick 1987.7 sec/kimg 496.93 maintenance 0.0 gpumem 10.2 augment 0.033
tick 8 kimg 32.0 time 4h 27m 28s sec/tick 1990.4 sec/kimg 497.60 maintenance 0.0 gpumem 10.2 augment 0.038
tick 9 kimg 36.0 time 5h 00m 38s sec/tick 1990.1 sec/kimg 497.52 maintenance 0.0 gpumem 10.2 augment 0.039
tick 10 kimg 40.0 time 5h 33m 49s sec/tick 1990.7 sec/kimg 497.67 maintenance 0.0 gpumem 10.2 augment 0.045
tick 11 kimg 44.0 time 6h 07m 05s sec/tick 1991.4 sec/kimg 497.86 maintenance 4.4 gpumem 10.2 augment 0.048
tick 12 kimg 48.0 time 6h 40m 19s sec/tick 1993.7 sec/kimg 498.42 maintenance 0.0 gpumem 10.2 augment 0.052
tick 13 kimg 52.0 time 7h 13m 34s sec/tick 1995.2 sec/kimg 498.79 maintenance 0.0 gpumem 10.2 augment 0.063
tick 14 kimg 56.0 time 7h 46m 52s sec/tick 1998.5 sec/kimg 499.61 maintenance 0.0 gpumem 10.2 augment 0.068
tick 15 kimg 60.0 time 8h 20m 11s sec/tick 1998.3 sec/kimg 499.57 maintenance 0.0 gpumem 10.2 augment 0.075
tick 16 kimg 64.0 time 8h 53m 36s sec/tick 2001.8 sec/kimg 500.45 maintenance 3.9 gpumem 10.2 augment 0.082
tick 17 kimg 68.0 time 9h 26m 58s sec/tick 2002.2 sec/kimg 500.55 maintenance 0.0 gpumem 10.2 augment 0.090
tick 18 kimg 72.0 time 10h 00m 24s sec/tick 2005.4 sec/kimg 501.35 maintenance 0.0 gpumem 10.2 augment 0.098
tick 19 kimg 76.0 time 10h 33m 49s sec/tick 2005.3 sec/kimg 501.32 maintenance 0.0 gpumem 10.2 augment 0.104
tick 20 kimg 80.0 time 11h 07m 16s sec/tick 2006.9 sec/kimg 501.73 maintenance 0.0 gpumem 10.2 augment 0.104
tick 21 kimg 84.0 time 11h 40m 47s sec/tick 2006.9 sec/kimg 501.73 maintenance 4.1 gpumem 10.2 augment 0.108
tick 22 kimg 88.0 time 12h 14m 16s sec/tick 2009.1 sec/kimg 502.26 maintenance 0.0 gpumem 10.2 augment 0.108
tick 23 kimg 92.0 time 12h 47m 45s sec/tick 2008.7 sec/kimg 502.18 maintenance 0.0 gpumem 10.2 augment 0.112
tick 24 kimg 96.0 time 13h 21m 16s sec/tick 2011.5 sec/kimg 502.87 maintenance 0.0 gpumem 10.2 augment 0.120
tick 25 kimg 100.0 time 13h 54m 46s sec/tick 2010.1 sec/kimg 502.52 maintenance 0.0 gpumem 10.2 augment 0.122
tick 26 kimg 104.0 time 14h 28m 22s sec/tick 2011.5 sec/kimg 502.86 maintenance 3.9 gpumem 10.2 augment 0.122
tick 27 kimg 108.0 time 15h 01m 53s sec/tick 2010.8 sec/kimg 502.71 maintenance 0.0 gpumem 10.2 augment 0.125
tick 28 kimg 112.0 time 15h 35m 26s sec/tick 2013.7 sec/kimg 503.42 maintenance 0.0 gpumem 10.2 augment 0.128
tick 29 kimg 116.0 time 16h 08m 59s sec/tick 2012.9 sec/kimg 503.23 maintenance 0.0 gpumem 10.2 augment 0.129
tick 30 kimg 120.0 time 16h 42m 32s sec/tick 2013.0 sec/kimg 503.26 maintenance 0.0 gpumem 10.2 augment 0.130
tick 31 kimg 124.0 time 17h 16m 09s sec/tick 2012.9 sec/kimg 503.22 maintenance 4.0 gpumem 10.2 augment 0.134
tick 32 kimg 128.0 time 17h 49m 43s sec/tick 2013.4 sec/kimg 503.34 maintenance 0.0 gpumem 10.2 augment 0.136
tick 33 kimg 132.0 time 18h 23m 16s sec/tick 2013.4 sec/kimg 503.36 maintenance 0.0 gpumem 10.2 augment 0.139
tick 34 kimg 136.0 time 18h 56m 49s sec/tick 2013.2 sec/kimg 503.29 maintenance 0.0 gpumem 10.2 augment 0.134
|
examples/2D/subsampling.ipynb | ###Markdown
create subsample filefolder and move files
###Code
%cd /home/qzt
%pwd
X = sorted(glob('data/dsb2018/train/images/*.tif'))
Y = sorted(glob('data/dsb2018/train/masks/*.tif'))
assert all(Path(x).name==Path(y).name for x,y in zip(X,Y))
ind = np.arange(len(X))
random.seed(4)
random.shuffle(ind)
ind_sub = ind[:5]
ind_sub
X_sub = [X[i] for i in ind_sub]
X_sub
name = [Path(x).name for x in X_sub]
name
Y_sub = ['data/dsb2018/train/masks/' + n for n in name]
Y_sub
import os
import shutil
m_path = 'data/dsb2018/train_sub/masks/'
i_path = 'data/dsb2018/train_sub/images/'
os.makedirs(os.path.dirname(i_path), exist_ok=True)
for f in X_sub:
shutil.copy(f, i_path)
os.makedirs(os.path.dirname(m_path), exist_ok=True)
for f in Y_sub:
shutil.copy(f, m_path)
###Output
_____no_output_____
###Markdown
read image file from subsampled folder
###Code
X = sorted(glob('data/dsb2018/train_sub/images/*.tif'))
Y = sorted(glob('data/dsb2018/train_sub/masks/*.tif'))
assert all(Path(x).name==Path(y).name for x,y in zip(X,Y))
X
im_X = list(map(imread,X))
im_Y = list(map(imread,Y))
i=0
img, lbl = im_X[i], fill_label_holes(im_Y[i])
assert img.ndim in (2,3)
name = Path(X[i]).name
print(img.shape)
# img = img if img.ndim==2 else img[...,:3]
# assumed axes ordering of img and lbl is: YX(C)
plt.figure(figsize=(16,10))
plt.subplot(121); plt.imshow(img,cmap='gray'); plt.axis('off'); plt.title(name)
plt.subplot(122); plt.imshow(lbl,cmap=lbl_cmap); plt.axis('off'); plt.title(name+'_labels')
None;
###Output
_____no_output_____ |
src/analysis/Characterizing Attackers by Account Activity.ipynb | ###Markdown
Load and Group Data
###Code
d = load_diffs(keep_diff=True)
df_events, df_blocked_user_text = load_block_events_and_users()
# The equal error threshold
threshold = 0.408
d['2015']['is_attack'] = (d['2015']['pred_attack_score_uncalibrated'] > threshold)
agg_dict = OrderedDict([('is_attack', ['count','sum']), ('user_id', 'first'), ('author_anon', 'first')])
df_a = d['2015'].groupby('user_text', as_index = False).agg(agg_dict)
df_a.columns = ['user_text', 'total', 'attacks', 'user_id', 'author_anon']
###Output
_____no_output_____
###Markdown
Helper Functions
###Code
def make_histogram(data, weights, bins, bin_labels, ylabel, xlabel, percent = True):
values, base = np.histogram(data, weights = weights, bins = bins)
center = (base[:-1] + base[1:])/2
if percent:
frac_values = 100*values/np.sum(values)
else:
frac_values = values
y_range = range(len(values))
plt.bar(y_range, frac_values, align = 'center')
plt.xticks(y_range, bin_labels)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
def make_split_histogram(data_1, data_2, weights_1, weights_2, bins, bin_labels, legend_labels, ylabel, xlabel):
reg_values, _ = np.histogram(data_1, weights = weights_1, bins = bins)
anon_values, _ = np.histogram(data_2, weights = weights_2, bins = bins)
total_values = np.sum(reg_values) + np.sum(anon_values)
frac_reg = 100.0*reg_values/total_values
frac_anon = 100.0*anon_values/total_values
y_range = range(len(reg_values))
p_reg = plt.bar(y_range, frac_reg, align = 'center', color = 'orange')
p_anon = plt.bar(y_range, frac_anon, align = 'center', bottom=frac_reg)
plt.xticks(y_range, bin_labels)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.legend((p_reg[0],p_anon[0]),legend_labels, loc = 0)
###Output
_____no_output_____
###Markdown
Distribution of Attacking Comments
###Code
bins = [1, 2, 6, 101, 10000]
bin_labels = ['1', '2 - 5', '6 - 100', '100+']
ylabel = 'Percentage of Attacking Comments'
xlabel = 'Editor Activity Level'
make_histogram(df_a[['total']], df_a[['attacks']], bins, bin_labels, ylabel, xlabel)
###Output
_____no_output_____
###Markdown
Split by Anonymity
###Code
df_anon = df_a.query('author_anon')
df_registered = df_a.query('not author_anon')
bins = [1, 2, 6, 101, 10000]
bin_labels = ['1', '2 - 5', '6 - 100', '100+']
legend_labels = ('Registered','Anonymous')
ylabel = 'Percentage of Attacking Comments'
xlabel = 'Editor Activity Level'
make_split_histogram(df_registered[['total']], df_anon[['total']], df_registered[['attacks']], df_anon[['attacks']], bins, bin_labels, legend_labels, ylabel, xlabel)
###Output
_____no_output_____
###Markdown
Distribution of All Comments
###Code
bins = [1, 2, 6, 101, 10000]
bin_labels = ['1', '2 - 5', '6 - 100', '100+']
ylabel = 'Percentage of All Comments'
xlabel = 'Editor Activity Level'
make_histogram(df_a[['total']], df_a[['total']], bins, bin_labels, ylabel, xlabel)
###Output
_____no_output_____
###Markdown
Percent of Attacks vs. Toxicity Level
###Code
bins = [1, 2, 6, 21, 10000]
bin_labels = ['1 \n (7048)', '2 - 5 \n (1970)', '6 - 20 \n (261)','20+ \n (34)']
ylabel = 'Percentage of Attacking Comments'
xlabel = 'Editor Toxicity Level \n (Number of Editors)'
make_histogram(df_a[['attacks']], df_a[['attacks']], bins, bin_labels, ylabel, xlabel)
bins = [1, 2, 6, 21, 10000]
bin_labels = ['1', '2 - 5', '6 - 20','20+']
ylabel = 'Percentage of Total Comments'
xlabel = 'Editor Toxicity Level'
make_histogram(df_a[['attacks']], df_a[['total']], bins, bin_labels, ylabel, xlabel)
bins = [1, 2, 6, 21, 10000]
bin_labels = ['1', '2 - 5', '6 - 20','20+']
ylabel = 'Number of Editors'
xlabel = 'Editor Toxicity Level'
make_histogram(df_a[['attacks']], None, bins, bin_labels, ylabel, xlabel, percent=False)
###Output
_____no_output_____ |
shell-ai-hackathon-weather-data/Level2/L2_ShellAI_Hackathon_2021_V1.ipynb | ###Markdown
Python modules used for the hackathon.
###Code
# !pip install pandas
# !pip install numpy
# !pip install sklearn
# !pip install matplotlib
# !pip install --upgrade pip
# !pip install graphviz
from google.colab import drive
drive.mount('/content/drive')
import os
import datetime
import re
import IPython
import IPython.display
from IPython.display import clear_output
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import mutual_info_regression
from sklearn.metrics import mean_absolute_error, silhouette_score
from scipy.stats import zscore
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.cluster import KMeans
from scipy.special import softmax
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Define commonally used variables
###Code
data_path = '/content/drive/MyDrive/ML_Projects/shell-ai-data-l2'
training_data_key = 'Data_Train'
testing_data_key = 'Data_Test'
# Training csv file: https://drive.google.com/file/d/1C7NuAI1i48y6GYoP4C0_7F4exC0TNdSP/view
# Testing csv file: https://drive.google.com/file/d/1MKRijjGs35uVTYAl4YFLcVV9ODb_5X97/view
training_path = os.path.join(data_path, training_data_key)
testing_path = os.path.join(data_path, testing_data_key)
# Image file links:
# 40x40_Projection: https://drive.google.com/file/d/1QWZwB0SDkiAH8ucNh9oaMibqqxZj7Nm0/view
# 40x40_Raw: https://drive.google.com/file/d/1C6s7satgiNK4bE6XWhSGOm2tusM_DKt5/view
# Please download and keep in your drive
img_kind = "Projection"
img_x = img_y = 40
imgs_npy_path = '{}/Images/{}x{}_{}.npy'.format(data_path, img_x, img_y, img_kind)
training_csv_path = os.path.join(training_path, 'weather_data.csv')
testing_csv_path = os.path.join(testing_path, 'weather_data.csv')
target_col = 'Global CMP22 (vent/cor) [W/m^2]'
date_col = 'DATE (YYYY/MM/DD)_MST'
scenario_set_col = 'scenario_set'
img_path_col = 'img_path'
img_index_col = 'img_index'
img_unavailable = -100
img_time_sine_col = 'Img Day sin'
img_time_cos_col = 'Img Day cos'
year_sine_col = 'Year sin'
year_cos_col = 'Year cos'
day_sine_col = 'Day sin'
day_cos_col = 'Day cos'
target_day_sine_col = 'Target Day sin'
target_day_cos_col = 'Target Day cos'
is_train_col = 'is_train'
target_date_cols = [target_day_sine_col, target_day_cos_col]
INPUT_STEPS = 12
SAMPLING_RATE = 10
OUTPUT_STEPS = 12
IMG_DELAY_TOLERANCE = 3
cloud_coverage_col = 'Total Cloud Cover [%]'
precipitation_col = 'Precipitation (Accumulated) [mm]'
moisture_col = 'Moisture'
feature_cols_for_outliers = [target_col, 'Direct sNIP [W/m^2]', 'Azimuth Angle [degrees]',
'Tower Dry Bulb Temp [deg C]', 'Tower Wet Bulb Temp [deg C]',
'Tower Dew Point Temp [deg C]', 'Tower RH [%]', cloud_coverage_col,
'Peak Wind Speed @ 6ft [m/s]', 'Avg Wind Direction @ 6ft [deg from N]',
'Station Pressure [mBar]', precipitation_col,
'Snow Depth [cm]', moisture_col, 'Albedo (CMP11)']
non_neg_feature_cols = [target_col, 'Direct sNIP [W/m^2]', cloud_coverage_col ]
time_series_cols = [target_col, 'Direct sNIP [W/m^2]', 'Tower Dry Bulb Temp [deg C]',
'Tower Wet Bulb Temp [deg C]', 'Tower Dew Point Temp [deg C]',
'Tower RH [%]', cloud_coverage_col, 'Station Pressure [mBar]',
'Snow Depth [cm]', 'Albedo (CMP11)', precipitation_col,
'Wind x','Wind y', 'Azimuth sin', moisture_col,
'Azimuth cos', day_sine_col, day_cos_col, year_sine_col, year_cos_col]
img_cols = [img_index_col, img_time_sine_col, img_time_cos_col]
feature_cols_for_prepare = time_series_cols + [img_index_col]
day_start_idx_col = 'day_start_idx'
feature_start_idx_col = 'feature_start_idx'
feature_steps_col = 'feature_steps'
feature_end_idx_col = 'feature_end_idx'
total_end_idx_col = 'total_end_idx'
cluster_col = 'cluster'
index_col = 'index'
idx_df_cols = [cluster_col, scenario_set_col, index_col, day_start_idx_col, feature_start_idx_col,
feature_end_idx_col, total_end_idx_col, feature_steps_col]
cluster_idx = idx_df_cols.index(cluster_col)
day_start_idx = idx_df_cols.index(day_start_idx_col)
feature_start_idx = idx_df_cols.index(feature_start_idx_col)
feature_end_idx = idx_df_cols.index(feature_end_idx_col)
total_end_idx = idx_df_cols.index(total_end_idx_col)
pred_df_cols = ['{}_min_horizon'.format(p) for p in range(10, 130, 10)]
# Converts timestamp series to sine and cos series to capture cyclic nature in the time
def convert_time_stamp_to_vector(timestamps, freq='day'):
if freq == 'day':
divisor = 24*60*60
elif freq == 'year':
divisor = (365.2425)*(24*60*60)
else:
raise Exception('Frequency not supported')
timestamp_s = (timestamps - pd.Timestamp("2010-01-01")).dt.total_seconds()
sine_series = np.sin(timestamp_s * (2 * np.pi / divisor))
cos_series = np.cos(timestamp_s * (2 * np.pi / divisor))
return sine_series, cos_series
###Output
_____no_output_____
###Markdown
Prepare images
###Code
with open(imgs_npy_path, 'rb') as f:
all_images = np.divide(np.load(f).reshape(-1, img_x, img_y, 3), 255)
all_image_paths = np.load(f)
all_images_tensor = tf.constant(all_images, dtype=tf.float32)
def prepare_img_df(img_paths_arr):
img_df = pd.DataFrame()
img_df[img_path_col] = img_paths_arr
img_df[img_index_col] = list(range(img_paths_arr.shape[0]))
img_date_extractor_fn = (lambda img_path: re.search(r"\d+", img_path.split('-')[-1]).group())
img_src_extractor_fn = (lambda img_path: str(img_path).split('-')[0])
img_df[date_col] = pd.to_datetime(img_df[img_path_col].apply(img_date_extractor_fn), format="%Y%m%d%H%M%S")
img_df['src'] = img_df[img_path_col].apply(img_src_extractor_fn)
img_df = img_df[~img_df[date_col].duplicated(keep='first')]
img_df[img_time_sine_col], img_df[img_time_cos_col] = convert_time_stamp_to_vector(img_df[date_col],
freq='day')
return img_df
all_image_info_df = prepare_img_df(all_image_paths)
train_image_paths_df = all_image_info_df[all_image_info_df['src'] == training_data_key].reset_index(drop=True)
test_image_paths_df = all_image_info_df[all_image_info_df['src'] == testing_data_key].reset_index(drop=True)
clear_output()
###Output
_____no_output_____
###Markdown
Read train and test datasets and convert to pandas Dataframes.
###Code
train_df = pd.read_csv(training_csv_path, parse_dates=[[0,1]])
test_df = pd.read_csv(testing_csv_path, parse_dates=[[0,1]])
train_df[scenario_set_col] = train_df[date_col].dt.strftime('%Y%m%d').astype(int)
train_df[is_train_col] = True
test_df[is_train_col] = False
###Output
_____no_output_____
###Markdown
Calculate z-scores and remove outliers. Resample and remove night time values as we are predicting for day time only.
###Code
def fill_outliers(df, features):
for col in features:
z_scores = np.abs(zscore(df[col])) > 3
df.at[z_scores, col] = np.nan
return df
def fill_na(df, axis=None):
df = df.fillna(method='ffill', axis=axis)
df = df.fillna(method='bfill', axis=axis)
df = df.fillna(0)
return df
def fill_non_neg_feature_cols(df, feature_cols):
for non_neg_feature_col in feature_cols:
neg_values = df[non_neg_feature_col] < 0
df.at[neg_values, non_neg_feature_col] = np.nan
df[non_neg_feature_col] = fill_na(df[non_neg_feature_col])
return df
def resample_groupwise(df, groupby=scenario_set_col, rule='{}T'.format(SAMPLING_RATE), agg='mean'):
new_dfs = []
for group_key, sub_df in df.groupby(groupby):
sub_df = sub_df.set_index(date_col).resample(rule).agg(agg).reset_index()
sub_df[groupby] = group_key
new_dfs.append(sub_df)
return pd.concat(new_dfs, axis=0).reset_index(drop=True)
def perform_cleanup(df):
valid_indices = df.index[df[target_col] >= 0]
min_valid = min(valid_indices)
max_valid = max(valid_indices)
df = df.loc[min_valid:max_valid]
df[feature_cols_for_outliers] = fill_na(df[feature_cols_for_outliers])
df = fill_non_neg_feature_cols(df, non_neg_feature_cols)
return df.reset_index(drop=True)
def fill_na_groupwise(df, groupby=scenario_set_col):
new_dfs = []
for group_key, sub_df in df.groupby(groupby):
sub_df = sub_df.sort_values(date_col).reset_index(drop=True)
sub_df = perform_cleanup(sub_df)
sub_df = sub_df.set_index(date_col)
sub_df_target = sub_df[[target_col]].copy()
sub_df = sub_df.resample(pd.Timedelta(minutes=SAMPLING_RATE)).mean().reset_index()
sub_df_target = sub_df_target.resample('{}T'.format(SAMPLING_RATE)).last().reset_index()
sub_df[target_col] = sub_df_target[[target_col]]
new_dfs.append(sub_df)
new_df = pd.concat(new_dfs, axis=0).reset_index(drop=True)
return new_df
###Output
_____no_output_____
###Markdown
**perform_preprocessing** does the following feature Engineering steps: * Converted wind speed into wind velocity vector (velocity along x and y axes)* Converted Azimuth angle in agree to sin and cos values as they better captures the cyclic nature.* Used Forward-Fill (ffill method in pandas) to fill the missing values in target column.* Convert date to cyclic features to capture seasons
###Code
def perform_preprocessing(df,img_df, img_delay_tolerance_duration):
peak_wind_speed_col = 'Peak Wind Speed @ 6ft [m/s]'
avg_wind_dir_col = 'Avg Wind Direction @ 6ft [deg from N]'
azimuth_angle_col = 'Azimuth Angle [degrees]'
wind_speed = pd.Series(df.pop(peak_wind_speed_col))
bad_wv = wind_speed < 0
wind_speed[bad_wv] = 0.0
wind_angle = df.pop(avg_wind_dir_col)*np.pi / 180
df['Wind x'] = wind_speed*np.cos(wind_angle)
df['Wind y'] = wind_speed*np.sin(wind_angle)
azimuth_angle = df.pop(azimuth_angle_col) * np.pi/180
df['Azimuth sin'] = np.sin(azimuth_angle)
df['Azimuth cos'] = np.cos(azimuth_angle)
day = 24*60*60
year = (365.2425)*day
df = df.join(img_df.set_index(date_col)[img_cols], how='left', on=date_col)
df[img_cols] = df[img_cols].fillna(method='ffill', limit=img_delay_tolerance_duration)
df[img_cols] = df[img_cols].fillna(img_unavailable)
df[[scenario_set_col, img_index_col]] = df[[scenario_set_col, img_index_col]].astype(np.int32)
df[day_sine_col], df[day_cos_col] = convert_time_stamp_to_vector(df[date_col], freq='day')
target_dates = df[date_col] + pd.Timedelta(minutes=INPUT_STEPS*SAMPLING_RATE)
df[target_day_sine_col], df[target_day_cos_col] = convert_time_stamp_to_vector(target_dates, freq='day')
df[year_sine_col], df[year_cos_col] = convert_time_stamp_to_vector(df[date_col], freq='year')
df = df.drop(columns=[is_train_col])
return df
###Output
_____no_output_____
###Markdown
Remove outliers and negative values and preprocess data
###Code
clean_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True)
clean_df = fill_outliers(clean_df, feature_cols_for_outliers)
train_df_cleaned = clean_df[clean_df[is_train_col]].reset_index(drop=True)
train_df_cleaned = fill_na_groupwise(train_df_cleaned, scenario_set_col)
test_df_cleaned = clean_df[~clean_df[is_train_col]].reset_index(drop=True)
test_df_cleaned = fill_na_groupwise(test_df_cleaned, scenario_set_col)
print("After clearing outliers", train_df.shape, train_df_cleaned.shape, test_df.shape, test_df_cleaned.shape)
del(clean_df)
train_df_cleaned = perform_preprocessing(train_df_cleaned, train_image_paths_df, IMG_DELAY_TOLERANCE*SAMPLING_RATE)
test_df_cleaned = perform_preprocessing(test_df_cleaned, test_image_paths_df, IMG_DELAY_TOLERANCE*SAMPLING_RATE)
print("After perform_preprocessing", train_df_cleaned.shape, test_df_cleaned.shape)
###Output
After clearing outliers (527040, 18) (28091, 18) (110000, 18) (11269, 18)
After perform_preprocessing (28091, 27) (11269, 27)
###Markdown
Clustering
###Code
num_clusters=4
cluster_cols = [target_col, year_sine_col, year_cos_col]
train_df_agg = train_df_cleaned.groupby(scenario_set_col).mean()
test_df_agg = test_df_cleaned.groupby(scenario_set_col).mean()
def apply_kmeans_clustering():
cluster_scaler = StandardScaler()
train_df_agg_scaled = cluster_scaler.fit_transform(train_df_agg[cluster_cols])
test_df_agg_scaled = cluster_scaler.transform(test_df_agg[cluster_cols])
kmeans = KMeans(n_clusters=num_clusters)
train_df_agg[cluster_col] = kmeans.fit_predict(train_df_agg_scaled)
test_df_agg[cluster_col] = kmeans.predict(test_df_agg_scaled)
def apply_date_based_clustering():
def map_to_cluster(month):
if month < 4 or month > 9:
return 0
else:
return 1
train_df_agg[cluster_col] = train_df_agg[date_col].dt.month.apply(map_to_cluster)
test_df_agg[cluster_col] = test_df_agg[date_col].dt.month.apply(map_to_cluster)
apply_kmeans_clustering()
train_df_agg = train_df_agg.reset_index()
test_df_agg = test_df_agg.reset_index()
train_df_cleaned[cluster_col] = pd.merge(train_df_cleaned[[scenario_set_col]], train_df_agg[[scenario_set_col, cluster_col]],
how='left', on=scenario_set_col)[cluster_col]
test_df_cleaned[cluster_col] = pd.merge(test_df_cleaned[[scenario_set_col]], test_df_agg[[scenario_set_col, cluster_col]],
how='left', on=scenario_set_col)[cluster_col]
print("After clustering", train_df_cleaned.shape, test_df_cleaned.shape)
print("Train\n", train_df_cleaned[cluster_col].value_counts(),"\nTest\n", test_df_cleaned[cluster_col].value_counts())
###Output
After clustering (28091, 28) (11269, 28)
Train
3 8478
1 7667
2 6276
0 5670
Name: cluster, dtype: int64
Test
1 4414
3 3005
2 2232
0 1618
Name: cluster, dtype: int64
###Markdown
Relation between features and target
###Code
# Mutual information scores
# def make_mi_scores(X, y):
# mi_scores = mutual_info_regression(X, y)
# mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
# mi_scores = mi_scores.sort_values(ascending=False)
# return mi_scores
# print("Train df:\n", make_mi_scores(train_df_cleaned.loc[:, feature_cols], train_df_cleaned[target_col]))
# print("Test df:\n", make_mi_scores(test_df_cleaned.loc[:, feature_cols], test_df_cleaned[target_col]))
###Output
_____no_output_____
###Markdown
**prepare_dfs_for_datasets** groups dataset by date/scenario and creates train and test datasets using helper functions.* We are grouping dataset by date so that data is not combined across different dates as we need to predict using data within a day in the test set. * We are preparing a dataframe contain indexes like feature start, feature end, target end (total end) etc. This is later used to create tensorflow datasets.
###Code
def prepare_index_rages(day_index_start, day_index_end, min_feature_steps,
max_feature_steps, target_steps):
total_end_range = np.arange(day_index_start+min_feature_steps+target_steps, day_index_end+1).reshape(-1,1)
feature_end_range = total_end_range - target_steps
steps_to_generate = np.arange(min_feature_steps, max_feature_steps+1, 3).reshape(1, -1)
feature_start_range = np.subtract(feature_end_range, steps_to_generate)
feature_start_range[feature_start_range<day_index_start]=day_index_start
feature_start_range = feature_start_range.min(axis=1, keepdims=True)
feature_steps_range = feature_end_range - feature_start_range
return np.hstack((feature_start_range, feature_end_range, total_end_range, feature_steps_range))
def filter_data_with_images(df_img, df_idx):
def images_available(row):
feature_start = int(row[feature_start_idx_col])
feature_end = int(row[feature_end_idx_col])
return df_img.loc[feature_end, img_index_col]!=img_unavailable
images_indexes_mask = df_idx.apply(images_available, axis=1)
return df_idx[images_indexes_mask].reset_index()
def prepare_dfs_for_datasets(combined_df, selected_scenarios, min_feature_steps=INPUT_STEPS,
max_feature_steps=INPUT_STEPS, target_steps=OUTPUT_STEPS):
data_df = combined_df
if selected_scenarios is not None:
data_df = combined_df[combined_df[scenario_set_col].isin(selected_scenarios)].reset_index(drop=True)
idx_dfs = []
for group_key, df in data_df.groupby(scenario_set_col):
df = df.sort_values(date_col)
# As range is exclusive of last element, we need to add 1 to df.index[-1], to make it inclusive
idx_ranges = prepare_index_rages(df.index[0], df.index[-1], min_feature_steps,
max_feature_steps, target_steps)
if len(idx_ranges) > 0:
idx_df = pd.DataFrame(idx_ranges, columns=[feature_start_idx_col, feature_end_idx_col,
total_end_idx_col, feature_steps_col])
idx_df[cluster_col] = df.loc[df.index[0], cluster_col]
idx_df[day_start_idx_col] = df.index[0]
idx_df[scenario_set_col] = group_key
idx_dfs.append(idx_df)
df_idx = pd.concat(idx_dfs, axis=0).reset_index(drop=True)
df_idx = filter_data_with_images(data_df[[img_index_col]], df_idx)
df_idx[index_col] = df_idx.index
data_df = data_df.set_index(date_col)
return data_df, df_idx[idx_df_cols]
def compute_target_std_for_input_duration(df_target, df_idx):
def compute_target_std(row):
feature_start = int(row[feature_start_idx_col])
feature_end = int(row[feature_end_idx_col])
return df_target[feature_start:feature_end].std()
return df_idx.apply(compute_target_std, axis=1)
def boosting_data_based_target_std(data_df, idx_df, factor=0.2):
target_std = compute_target_std_for_input_duration(train_data_df[target_col], train_idx_df)
probabilities = softmax(np.divide(target_std, np.max(target_std)))
final_indexes = train_idx_df.index.values
boosted_indexes = np.random.choice(probabilities.shape[0], int(probabilities.shape[0]*factor), p=probabilities)
final_indexes = np.append(final_indexes, boosted_indexes)
return idx_df.loc[final_indexes].reset_index(drop=True)
train_df_scenarios = train_df_cleaned[scenario_set_col].unique()
test_df_scenarios = test_df_cleaned[scenario_set_col].unique()
test_size = 0.3
other_scenarios, val_scenarios = train_test_split(test_df_scenarios, test_size=test_size)
other_scenarios, test_scenarios = train_test_split(other_scenarios, test_size=test_size/(1-test_size))
train_scenarios = np.append(train_df_scenarios, other_scenarios)
print("scenarios", len(train_scenarios), len(val_scenarios), len(test_scenarios))
combined_df = pd.concat([train_df_cleaned, test_df_cleaned], axis=0).reset_index(drop=True)
train_data_df, train_idx_df = prepare_dfs_for_datasets(combined_df, train_scenarios)
val_data_df, val_idx_df = prepare_dfs_for_datasets(test_df_cleaned, val_scenarios)
test_data_df, test_idx_df = prepare_dfs_for_datasets(test_df_cleaned, test_scenarios)
train_idx_df_boosted = boosting_data_based_target_std(train_data_df, train_idx_df, factor=0.3)
train_idx_df_boosted = train_idx_df_boosted.sample(frac=1.0).reset_index(drop=True)
train_idx_df_boosted[index_col] = train_idx_df_boosted.index
val_idx_df[index_col] = val_idx_df.index
test_idx_df[index_col] = test_idx_df.index
ts_scaler = MinMaxScaler()
scaled_column_types = ['float16', 'float32', 'float64']
scaled_columns = list(train_data_df.select_dtypes(include=scaled_column_types).columns)
ts_scaler.fit(train_data_df[scaled_columns])
train_data_df_scaled = pd.DataFrame(ts_scaler.transform(train_data_df[scaled_columns]), columns=scaled_columns)
val_data_df_scaled = pd.DataFrame(ts_scaler.transform(val_data_df[scaled_columns]), columns=scaled_columns)
test_data_df_scaled = pd.DataFrame(ts_scaler.transform(test_data_df[scaled_columns]), columns=scaled_columns)
print("train", train_data_df.shape, train_data_df_scaled.shape, train_idx_df.shape, train_idx_df_boosted.shape)
print("val", val_data_df.shape, val_data_df_scaled.shape, val_idx_df.shape)
print("test", test_data_df.shape, test_data_df_scaled.shape, test_idx_df.shape)
train_data_df.head()
train_data_df_scaled.head()
###Output
_____no_output_____
###Markdown
Each record in Index dataframe will become to one training example.* Feature start to end becomes features (input steps, features)* Feature end to total end becomes target (output steps, target)
###Code
train_idx_df_boosted.head()
###Output
_____no_output_____
###Markdown
Tensorflow Datasets Creation
###Code
def extract_image(idx):
return all_images_tensor[idx]
def create_single_image_dataset(img_df, idx_df, img_col_index, cluster_id=None,):
img_df_tensor = tf.cast(tf.convert_to_tensor(img_df[img_index_col].values), tf.int32)
def extract_image_idx(idx_row):
return img_df_tensor[idx_row[img_col_index]]
ds = create_cluster_idx_dataset(idx_df, cluster_id=cluster_id)
ds = ds.map(extract_image_idx).map(extract_image)
return ds
def create_mluti_images_dataset(img_df, idx_df, start_index, end_index, cluster_id=None,):
img_df_tensor = tf.cast(tf.convert_to_tensor(img_df[img_index_col].values), tf.int32)
def extract_images(image_indexes):
return tf.map_fn(extract_image, image_indexes, fn_output_signature=tf.float32)
def extract_image_indexes(idx_row):
image_indexes = img_df_tensor[idx_row[start_index]:idx_row[end_index]]
# image_indexes, _ = tf.unique(image_indexes)
valid_indexes_mask = tf.map_fn(lambda idx: idx != img_unavailable, image_indexes,
fn_output_signature=tf.bool)
return tf.boolean_mask(image_indexes, valid_indexes_mask)
ds = create_cluster_idx_dataset(idx_df, cluster_id=cluster_id)
ds = ds.map(extract_image_indexes).map(extract_images)
return ds
def create_cluster_idx_dataset(idx_df, cluster_id=None):
ds = tf.data.Dataset.from_tensor_slices(idx_df.values)
ds = ds.map(lambda elem: tf.cast(elem, tf.int32))
if cluster_id is not None:
ds = ds.filter(lambda elem: elem[cluster_idx] == cluster_id)
return ds
def create_time_series_dataset(ts_df, idx_df, end_index, start_index=None, steps=None,
cluster_id=None, reshape=None):
if start_index is None and steps is None:
raise ValueError("either start_index or steps required")
# bucket_bounderies = [13, 16, 19, 22, 25, 28, 31, 34]
ts_tensor = tf.convert_to_tensor(ts_df.values)
def extract_ts_records(row):
feature_start = row[start_index] if start_index is not None else row[end_index]-steps
features = ts_tensor[feature_start:row[end_index]]
return features
def extract_ts_records_for_batch(rows):
return tf.map_fn(extract_ts_records, rows, fn_output_signature=tf.float64)
ds = create_cluster_idx_dataset(idx_df, cluster_id=cluster_id)
# Enable this for variable batch size
# ds = ds.apply(tf.data.experimental.bucket_by_sequence_length(
# element_length_func=lambda elem: elem[-2],
# bucket_boundaries=bucket_bounderies,
# bucket_batch_sizes=np.repeat(batch_size, len(bucket_bounderies)+1)))
ds = ds.map(extract_ts_records)
if reshape is not None:
ds = ds.map(lambda elem: tf.reshape(elem, reshape))
return ds
# Creates aggregated features like mean and standard deviation
def create_agg_dataset(scaled, idx_df, cluster_id=None):
ts_tensor = tf.convert_to_tensor(scaled[target_col].values)
def compute_agg_features(idx_row):
day_ts = ts_tensor[idx_row[day_start_idx]:idx_row[feature_end_idx]]
input_steps_ts = ts_tensor[idx_row[feature_start_idx]:idx_row[feature_end_idx]]
day_mean = tf.math.reduce_mean(day_ts)
day_std = tf.math.reduce_std(day_ts)
input_steps_mean = tf.math.reduce_mean(input_steps_ts)
input_steps_std = tf.math.reduce_std(input_steps_ts)
return tf.convert_to_tensor([day_mean, day_std, input_steps_mean, input_steps_std])
ds = create_cluster_idx_dataset(idx_df, cluster_id)
ds = ds.map(compute_agg_features)
return ds
def create_features_dataset(scaled_df, data_df, idx_df, cluster_id=None):
ts_ds = create_time_series_dataset(scaled_df[time_series_cols], idx_df, feature_end_idx,
start_index=feature_start_idx, cluster_id=cluster_id)
img_ds = create_single_image_dataset(data_df[img_cols], idx_df, feature_end_idx, cluster_id=cluster_id)
# img_ds = create_multi_images_dataset(img_df, idx_df, feature_start_idx, feature_end_idx, cluster_id)
agg_ds = create_agg_dataset(scaled_df, idx_df, cluster_id=cluster_id)
# If input steps != output steps this won't work, this needs to be fixed
target_dates_ds = create_time_series_dataset(scaled_df[target_date_cols], idx_df, feature_end_idx,
steps=OUTPUT_STEPS, cluster_id=cluster_id)
return tf.data.Dataset.zip(((ts_ds, img_ds, agg_ds, target_dates_ds),))
def create_dataset_with_labels(scaled_df, data_df, idx_df, cluster_id=None, batch_size=32):
features_ds = create_features_dataset(scaled_df, data_df, idx_df, cluster_id=cluster_id)
target_ds = create_time_series_dataset(data_df[target_col], idx_df, total_end_idx, steps=OUTPUT_STEPS,
cluster_id=cluster_id, reshape=(OUTPUT_STEPS,))
return tf.data.Dataset.zip((features_ds, target_ds)).batch(batch_size).prefetch(tf.data.AUTOTUNE)
###Output
_____no_output_____
###Markdown
Image Dataset
###Code
for elem in create_single_image_dataset(train_data_df[img_cols], train_idx_df_boosted,
feature_end_idx).batch(1).take(1):
print(elem.shape)
###Output
(1, 40, 40, 3)
###Markdown
Time series Dataset
###Code
for elem in create_time_series_dataset(train_data_df[time_series_cols], train_idx_df_boosted,
feature_end_idx, start_index=feature_start_idx).batch(1).take(1):
print(elem.shape)
###Output
(1, 12, 20)
###Markdown
All features dataset
###Code
for elem in create_features_dataset(train_data_df_scaled, train_data_df, train_idx_df_boosted).batch(1).take(1):
for item in elem[0]:
print(item.shape)
###Output
(1, 12, 20)
(1, 40, 40, 3)
(1, 4)
(1, 12, 2)
###Markdown
Prepare target data for computing individual predictions. This is for analyzing the prediction errors with respect to input features.
###Code
train_target_ts_data = np.array(list(create_time_series_dataset(train_data_df[target_col],
train_idx_df_boosted, total_end_idx,
steps=OUTPUT_STEPS,
reshape=(OUTPUT_STEPS,)).as_numpy_iterator()))
val_target_ts_data = np.array(list(create_time_series_dataset(val_data_df[target_col], val_idx_df,
total_end_idx, steps=OUTPUT_STEPS,
reshape=(OUTPUT_STEPS,)).as_numpy_iterator()))
test_target_ts_data = np.array(list(create_time_series_dataset(test_data_df[target_col], test_idx_df,
total_end_idx, steps=OUTPUT_STEPS,
reshape=(OUTPUT_STEPS,)).as_numpy_iterator()))
train_ds = create_dataset_with_labels(train_data_df_scaled, train_data_df, train_idx_df_boosted)
val_ds = create_dataset_with_labels(val_data_df_scaled, val_data_df, val_idx_df)
test_ds = create_dataset_with_labels(test_data_df_scaled, test_data_df, test_idx_df)
print("training target:", train_target_ts_data.shape)
print("training dataset:", train_ds)
# Model prediction error is increasing with time steps in the target so we are using
# weighted mae such that model focuses on reducing error for later time steps
def weighted_mae(y_true, y_pred):
loss = tf.abs(tf.subtract(y_pred, y_true)) # (batch_size, OUTPUT_STEPS)
# multiplying the values with weights along batch dimension
loss = loss * tf.nn.softmax(tf.linspace(0.0, 0.5, OUTPUT_STEPS))
# summing both loss values along batch dimension
loss = tf.reduce_sum(loss, axis=1) # (batch_size,)
return loss
def compile_and_fit(model, train_ds, val_ds, model_path, epochs=5, patience=3):
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, mode='min')
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.1, patience=patience,
mode='min', min_delta=0.0001, cooldown=0, min_lr=0.0001)
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=model_path, monitor='val_loss',
mode='min', save_weights_only=True,
save_best_only=True)
model.compile(loss=weighted_mae, optimizer=tf.keras.optimizers.Adamax(learning_rate=0.01))
return model.fit(train_ds, epochs=epochs, validation_data=val_ds,
callbacks=[early_stopping, model_checkpoint_callback, reduce_lr])
def create_conv_lstm_model(ix=img_x, iy=img_y):
cnn_lstm_input = tf.keras.Input(shape=(INPUT_STEPS, ix, iy, 3), name="img")
img_features = tf.keras.layers.ConvLSTM2D(32, 3, activation="relu",
return_sequences=True)(cnn_lstm_input)
img_features = tf.keras.layers.MaxPooling3D(pool_size=(1, 2, 2))(img_features)
img_features = tf.keras.layers.ConvLSTM2D(16, 3, activation="relu",
return_sequences=True)(img_features)
img_features = tf.keras.layers.MaxPooling3D(pool_size=(1, 2, 2))(img_features)
img_features = tf.keras.layers.ConvLSTM2D(16, 3, activation="relu",
return_sequences=True)(img_features)
img_features = tf.keras.layers.MaxPooling3D(pool_size=(1, 4, 4))(img_features)
cnn_lstm_output = tf.keras.layers.Reshape((INPUT_STEPS, -1))(img_features)
return tf.keras.Model(inputs=[cnn_lstm_input], outputs=[cnn_lstm_output], name="cnn-lstm")
def create_multi_conv_model(ix=img_x, iy=img_y):
cnn_input = tf.keras.Input(shape=(INPUT_STEPS, ix, iy, 3), name="img")
img_features = tf.keras.layers.Conv2D(32, 3, activation="relu")(cnn_input)
img_features = tf.keras.layers.MaxPooling3D(pool_size=(1, 2, 2))(img_features)
img_features = tf.keras.layers.Conv2D(16, 3, activation="relu")(img_features)
img_features = tf.keras.layers.MaxPooling3D(pool_size=(1, 2, 2))(img_features)
img_features = tf.keras.layers.Conv2D(16, 3, activation="relu")(img_features)
img_features = tf.keras.layers.MaxPooling3D(pool_size=(1, 4, 4))(img_features)
cnn_output = tf.keras.layers.Reshape((INPUT_STEPS, -1))(img_features)
return tf.keras.Model(inputs=[cnn_input], outputs=[cnn_output], name="cnn-multi")
def create_single_conv_model(ix=img_x, iy=img_y):
cnn_input = tf.keras.Input(shape=(ix, iy, 3), name="img")
img_features = tf.keras.layers.Conv2D(32, 3, activation="relu")(cnn_input)
img_features = tf.keras.layers.MaxPooling2D(2)(img_features)
img_features = tf.keras.layers.Conv2D(16, 3, activation="relu")(img_features)
img_features = tf.keras.layers.MaxPooling2D(2)(img_features)
img_features = tf.keras.layers.Conv2D(8, 3, activation="relu")(img_features)
cnn_output = tf.keras.layers.GlobalMaxPooling2D()(img_features)
return tf.keras.Model(inputs=[cnn_input], outputs=[cnn_output], name="cnn")
def create_model(ix=img_x, iy=img_y):
# img_input = tf.keras.Input(shape=(INPUT_STEPS, ix, iy, 3), name="images")
img_input = tf.keras.Input(shape=(ix, iy, 3), name="image_data")
ts_input = tf.keras.Input(shape=(INPUT_STEPS, len(time_series_cols)), name='weather_data')
agg_input = tf.keras.Input(shape=(4,), name='aggregate_features')
target_dates_input = tf.keras.Input(shape=(OUTPUT_STEPS, len(target_date_cols)), name='target_time')
img_output = create_single_conv_model(ix, iy)(img_input)
# img_output = create_conv_lstm_model(ix, iy)(img_input)
# img_output = create_multi_conv_model(ix, iy)(img_input)
ts_features = tf.keras.layers.LSTM(32, return_sequences=False, name='input_time_series_processing')(ts_input)
all_features = tf.keras.layers.Concatenate(name='all_input_features')([ts_features, img_output, agg_input])
all_features = tf.keras.layers.RepeatVector(OUTPUT_STEPS, name='time_series_expansion')(all_features)
all_features = tf.keras.layers.Concatenate(name='target_time_association')([all_features, target_dates_input])
all_features = tf.keras.layers.LSTM(128, return_sequences=False, name='output_time_series_processing')(all_features)
all_features = tf.keras.layers.Dense(128, activation='relu')(all_features)
final_output = tf.keras.layers.Dense(OUTPUT_STEPS, name='output')(all_features)
model = tf.keras.Model(inputs=[ts_input, img_input, agg_input, target_dates_input], outputs=[final_output])
return model
models_location = os.path.join(data_path, "models", "ts-images-agg-td")
base_model_location = os.path.join(models_location, "base")
def train_cluster(cluster_id, base_model=None, base_model_location=None):
print("training for", cluster_id)
model = create_model()
cluster_model_location = os.path.join(models_location, "cluster-{}".format(cluster_id))
model_path = os.path.join(cluster_model_location, "checkpoint")
if base_model is not None:
model.set_weights(base_model.get_weights())
elif base_model_location is not None:
model.load_weights(tf.train.latest_checkpoint(base_model_location))
cluster_train_ds = create_dataset_with_labels(train_data_df_scaled, train_data_df,
train_idx_df_boosted, cluster_id=cluster_id)
cluster_val_ds = create_dataset_with_labels(val_data_df_scaled, val_data_df,
val_idx_df, cluster_id=cluster_id)
compile_and_fit(model, cluster_train_ds, cluster_val_ds, model_path, epochs=100, patience=5)
best_model = create_model()
best_model.load_weights(tf.train.latest_checkpoint(cluster_model_location))
best_model.compile(loss='mae', optimizer='Adamax')
clear_output()
return best_model
def train_clusters(base_model=None, base_model_location=None):
cluster_models = []
for cluster_id in range(num_clusters):
cluster_models.append(train_cluster(cluster_id, base_model=base_model,
base_model_location=base_model_location))
return cluster_models
# We need to predict only positive values but sometimes model is predicting
# negative values so we are filling those values using other values.
def forward_fill_predictions(predictions):
predictions[predictions < 0] = np.nan
df = pd.DataFrame(predictions).fillna(method="ffill", axis=1).fillna(method="bfill", axis=1).fillna(0)
return df.values
def predict_with_base_model(model, ts_ds, idx_df):
predictions = base_best_model.predict(ts_ds, verbose=0)
predictions = forward_fill_predictions(predictions)
final_predictions = np.hstack([idx_df[[index_col, cluster_col]].values, predictions])
return final_predictions[final_predictions[:, 0].argsort()]
def predict_with_single_cluster_model(models, scaled_df, data_df, idx_df):
all_predictions = []
for cluster_id, model in enumerate(models):
feature_ds = create_features_dataset(scaled_df, data_df, idx_df, cluster_id).batch(32)
predictions = forward_fill_predictions(model.predict(feature_ds, verbose=0))
cluster_idx_arr = idx_df[idx_df[cluster_col]==cluster_id][[index_col, cluster_col]].values
all_predictions.append(np.hstack([cluster_idx_arr, predictions]))
final_predictions = np.vstack(all_predictions)
return final_predictions[final_predictions[:, 0].argsort()]
tf.keras.utils.plot_model(create_model(), to_file="model.png")
###Output
_____no_output_____
###Markdown
**Model Training** We used combination of LSTM (long short term memory), CNN, and dense layers to train the model.
###Code
%%time
# Due to random initialization, results will be different for every training cycle
# so it is good idea to run training for different cycles and check the consistency of
# the results
def train_models(epochs=100, cycles=5):
models = []
for i in range(cycles):
print("Model training cycle", i)
model = create_model()
iter_model_location = os.path.join(base_model_location, "iteration-{}".format(i))
model_path = os.path.join(iter_model_location, "checkpoint")
compile_and_fit(model, train_ds, val_ds, model_path, epochs=epochs, patience=3)
iter_best_model = create_model()
iter_best_model.load_weights(tf.train.latest_checkpoint(iter_model_location))
iter_best_model.compile(loss='mae', optimizer='Adamax')
clear_output()
models.append((iter_best_model, iter_model_location))
return models
# Run with epoches=30 for better results
iter_models = train_models(epochs=1, cycles=1)
base_best_model, base_best_model_location = iter_models[0]
# cluster_models = train_clusters(base_model=base_best_model)
###Output
CPU times: user 1min 54s, sys: 21.8 s, total: 2min 16s
Wall time: 1min 25s
###Markdown
Results:
###Code
%%time
train_preds_base_model = predict_with_base_model(base_best_model, train_ds, train_idx_df_boosted)
val_preds_base_model = predict_with_base_model(base_best_model, val_ds, val_idx_df)
test_preds_base_model = predict_with_base_model(base_best_model, test_ds, test_idx_df)
train_errors = np.abs(np.subtract(train_target_ts_data, train_preds_base_model[:, -OUTPUT_STEPS:]))
val_errors = np.abs(np.subtract(val_target_ts_data, val_preds_base_model[:, -OUTPUT_STEPS:]))
test_errors = np.abs(np.subtract(test_target_ts_data, test_preds_base_model[:, -OUTPUT_STEPS:]))
print("Train loss:", "with base=", train_errors.mean())
print("Val loss:", "with base=", val_errors.mean())
print("Test loss:", "with base=", test_errors.mean())
# train_errors_df = pd.concat([train_idx_df, pd.DataFrame(train_errors, columns=pred_df_cols)], axis=1)
# val_errors_df = pd.concat([val_idx_df, pd.DataFrame(val_errors, columns=pred_df_cols)], axis=1)
# test_errors_df = pd.concat([test_idx_df, pd.DataFrame(test_errors, columns=pred_df_cols)], axis=1)
# train_preds_cluster_models = predict_with_single_cluster_model(cluster_models, train_data_df_scaled,
# train_data_df, train_idx_df_boosted)
# val_preds_cluster_models = predict_with_single_cluster_model(cluster_models, val_data_df_scaled,
# val_data_df, val_idx_df)
# test_preds_cluster_models = predict_with_single_cluster_model(cluster_models, test_data_df_scaled,
# test_data_df, test_idx_df)
# print("Train loss:", "with base=", train_errors.mean(),
# "with cluster=", mean_absolute_error(train_target_ts_data, train_preds_cluster_models[:, -OUTPUT_STEPS:]))
# print("Val loss:", "with base=", val_errors.mean(),
# "with cluster=", mean_absolute_error(val_target_ts_data, val_preds_cluster_models[:, -OUTPUT_STEPS:]))
# print("Test loss:", "with base=", test_errors.mean(),
# "with cluster=", mean_absolute_error(test_target_ts_data, test_preds_cluster_models[:, -OUTPUT_STEPS:]))
print("\n")
###Output
Train loss: with base= 105.11174137602481
Val loss: with base= 96.7350869157766
Test loss: with base= 89.78371596670247
CPU times: user 43.3 s, sys: 11.5 s, total: 54.8 s
Wall time: 34.5 s
|
05.NLP_TextMining/03_Sentence_Classification_with_BERT.ipynb | ###Markdown
03 - Sentence Classification with BERT**Status: Work in progress. Check back later.**In this notebook, we will use pre-trained deep learning model to process some text. We will then use the output of that model to classify the text. The text is a list of sentences from film reviews. And we will calssify each sentence as either speaking "positively" about its subject of "negatively". ModelsThe classification model we will use is logistic regression using the Scikit Learn library. The deep learning model is distilBERT, a high-performance version of the latest cutting-edge NLP model. We will use the implementation from the [huggingface transformers library](https://huggingface.co/). DatasetThe dataset we will use in this example is [SST2](https://nlp.stanford.edu/sentiment/index.html), which contains sentences from movie reviews, each labeled as either positive (has the value 1) or negative (has the value 0): sentence label a stirring , funny and finally transporting re imagining of beauty and the beast and 1930s horror films 1 apparently reassembled from the cutting room floor of any given daytime soap 0 they presume their audience won't sit still for a sociology lesson 0 this is a visually stunning rumination on love , memory , history and the war between art and commerce 1 jonathan parker 's bartleby should have been the be all end all of the modern office anomie films 1 Installing the transformers libraryLet's start by installing the huggingface transformers library so we can load our deep learning NLP model.
###Code
!pip install transformers
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
import torch
import transformers as ppb
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Importing the datasetWe'll use pandas to read the dataset and load it into a dataframe.
###Code
df = pd.read_csv('https://github.com/clairett/pytorch-sentiment-classification/raw/master/data/SST2/train.tsv', delimiter='\t', header=None)
###Output
_____no_output_____
###Markdown
For performance reasons, we'll only use 2,000 sentences from the dataset
###Code
batch_1 = df[:2000]
###Output
_____no_output_____
###Markdown
We can ask pandas how many sentences are labeled as "positive" (value 1) and how many are labeled "negative" (having the value 0)
###Code
batch_1[1].value_counts()
###Output
_____no_output_____
###Markdown
Loading the Pre-trained BERT modelLet's now load a pre-trained BERT model.
###Code
model_class, tokenizer_class, pretrained_weights = (ppb.DistilBertModel, ppb.DistilBertTokenizer, 'distilbert-base-uncased')
# Load pretrained model/tokenizer
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights)
###Output
Downloading: 100%|██████████| 442/442 [00:00<00:00, 198kB/s]
Downloading: 100%|██████████| 268M/268M [04:36<00:00, 970kB/s]
###Markdown
Right now, the variable `model` holds a pretrained distilBERT model -- a version of BERT that is smaller, but much faster and requiring a lot less memory. Model 1: Preparing the DatasetBefore we can hand our sentences to BERT, we need to so some minimal processing to put them in the format it requires. TokenizationOur first step is to tokenize the sentences -- break them up into word and subwords in the format BERT is comfortable with.
###Code
tokenized = batch_1[0].apply((lambda x: tokenizer.encode(x, add_special_tokens=True)))
###Output
_____no_output_____
###Markdown
PaddingAfter tokenization, `tokenized` is a list of sentences -- each sentences is represented as a list of tokens. We want BERT to process our examples all at once (as one batch). It's just faster that way. For that reason, we need to pad all lists to the same size, so we can represent the input as one 2-d array, rather than a list of lists (of different lengths).
###Code
tokenized.shape
len(tokenized[0])
max_len = 0
for i in tokenized.values:
if len(i) > max_len:
max_len = len(i)
padded = np.array([i + [0]*(max_len-len(i)) for i in tokenized.values])
###Output
_____no_output_____
###Markdown
Our dataset is now in the `padded` variable, we can view its dimensions below:
###Code
np.array(padded).shape
###Output
_____no_output_____
###Markdown
MaskingIf we directly send `padded` to BERT, that would slightly confuse it. We need to create another variable to tell it to ignore (mask) the padding we've added when it's processing its input. That's what attention_mask is:
###Code
attention_mask = np.where(padded != 0, 1, 0)
attention_mask.shape
###Output
_____no_output_____
###Markdown
Model 1: And Now, Deep Learning!Now that we have our model and inputs ready, let's run our model!The `model()` function runs our sentences through BERT. The results of the processing will be returned into `last_hidden_states`.
###Code
input_ids = torch.tensor(padded)
attention_mask = torch.tensor(attention_mask)
with torch.no_grad():
last_hidden_states = model(input_ids, attention_mask=attention_mask)
###Output
_____no_output_____
###Markdown
Let's slice only the part of the output that we need. That is the output corresponding the first token of each sentence. The way BERT does sentence classification, is that it adds a token called `[CLS]` (for classification) at the beginning of every sentence. The output corresponding to that token can be thought of as an embedding for the entire sentence.We'll save those in the `features` variable, as they'll serve as the features to our logitics regression model.
###Code
features = last_hidden_states[0][:,0,:].numpy()
###Output
_____no_output_____
###Markdown
The labels indicating which sentence is positive and negative now go into the `labels` variable
###Code
labels = batch_1[1]
labels.shape
labels[:5]
###Output
_____no_output_____
###Markdown
Model 2: Train/Test SplitLet's now split our datset into a training set and testing set (even though we're using 2,000 sentences from the SST2 training set).
###Code
train_features, test_features, train_labels, test_labels = train_test_split(features, labels)
###Output
_____no_output_____
###Markdown
[Bonus] Grid Search for ParametersWe can dive into Logistic regression directly with the Scikit Learn default parameters, but sometimes it's worth searching for the best value of the C parameter, which determines regularization strength.
###Code
# parameters = {'C': np.linspace(0.0001, 100, 20)}
# grid_search = GridSearchCV(LogisticRegression(), parameters)
# grid_search.fit(train_features, train_labels)
# print('best parameters: ', grid_search.best_params_)
# print('best scrores: ', grid_search.best_score_)
###Output
_____no_output_____
###Markdown
We now train the LogisticRegression model. If you've chosen to do the gridsearch, you can plug the value of C into the model declaration (e.g. `LogisticRegression(C=5.2)`).
###Code
lr_clf = LogisticRegression()
lr_clf.fit(train_features, train_labels)
###Output
_____no_output_____
###Markdown
Evaluating Model 2So how well does our model do in classifying sentences? One way is to check the accuracy against the testing dataset:
###Code
lr_clf.score(test_features, test_labels)
###Output
_____no_output_____
###Markdown
How good is this score? What can we compare it against? Let's first look at a dummy classifier:
###Code
from sklearn.dummy import DummyClassifier
clf = DummyClassifier()
scores = cross_val_score(clf, train_features, train_labels)
print("Dummy classifier score: %0.3f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
###Output
Dummy classifier score: 0.515 (+/- 0.04)
|
week3/qlearning.ipynb | ###Markdown
Q-learningThis notebook will guide you through implementation of vanilla Q-learning algorithm.You need to implement QLearningAgent (follow instructions for each method) and use it on a number of tests below.
###Code
import sys, os
if 'google.colab' in sys.modules and not os.path.exists('.setup_complete'):
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/spring20/setup_colab.sh -O- | bash
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/grading.py -O ../grading.py
!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/week3_model_free/submit.py
!touch .setup_complete
# This code creates a virtual display to draw game images on.
# It will have no effect if your machine has a monitor.
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
os.environ['DISPLAY'] = ':1'
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from collections import defaultdict
import random
import math
import numpy as np
class QLearningAgent:
def __init__(self, alpha, epsilon, discount, get_legal_actions):
"""
Q-Learning Agent
based on https://inst.eecs.berkeley.edu/~cs188/sp19/projects.html
Instance variables you have access to
- self.epsilon (exploration prob)
- self.alpha (learning rate)
- self.discount (discount rate aka gamma)
Functions you should use
- self.get_legal_actions(state) {state, hashable -> list of actions, each is hashable}
which returns legal actions for a state
- self.get_qvalue(state,action)
which returns Q(state,action)
- self.set_qvalue(state,action,value)
which sets Q(state,action) := value
!!!Important!!!
Note: please avoid using self._qValues directly.
There's a special self.get_qvalue/set_qvalue for that.
"""
self.get_legal_actions = get_legal_actions
self._qvalues = defaultdict(lambda: defaultdict(lambda: 0))
self.alpha = alpha
self.epsilon = epsilon
self.discount = discount
def get_qvalue(self, state, action):
""" Returns Q(state,action) """
return self._qvalues[state][action]
def set_qvalue(self, state, action, value):
""" Sets the Qvalue for [state,action] to the given value """
self._qvalues[state][action] = value
#---------------------START OF YOUR CODE---------------------#
def get_value(self, state):
"""
Compute your agent's estimate of V(s) using current q-values
V(s) = max_over_action Q(state,action) over possible actions.
Note: please take into account that q-values can be negative.
"""
possible_actions = self.get_legal_actions(state)
# If there are no legal actions, return 0.0
if len(possible_actions) == 0:
return 0.0
value = max([self.get_qvalue(state, action) for action in possible_actions])
return value
def update(self, state, action, reward, next_state):
"""
You should do your Q-Value update here:
Q(s,a) := (1 - alpha) * Q(s,a) + alpha * (r + gamma * V(s'))
"""
# agent parameters
gamma = self.discount
learning_rate = self.alpha
ini_qvalue = self.get_qvalue(state, action)
new_qvalue = (1 - learning_rate) * ini_qvalue + learning_rate * (reward + gamma * self.get_value(next_state))
self.set_qvalue(state, action, new_qvalue)
def get_best_action(self, state):
"""
Compute the best action to take in a state (using current q-values).
"""
possible_actions = self.get_legal_actions(state)
# If there are no legal actions, return None
if len(possible_actions) == 0:
return None
action_values = {action: self.get_qvalue(state, action) for action in possible_actions}
best_action = sorted(action_values, key=lambda x: action_values[x], reverse=True)[0]
return best_action
def get_action(self, state):
"""
Compute the action to take in the current state, including exploration.
With probability self.epsilon, we should take a random action.
otherwise - the best policy action (self.get_best_action).
Note: To pick randomly from a list, use random.choice(list).
To pick True or False with a given probablity, generate uniform number in [0, 1]
and compare it with your probability
"""
# Pick Action
possible_actions = self.get_legal_actions(state)
action = None
# If there are no legal actions, return None
if len(possible_actions) == 0:
return None
# agent parameters:
epsilon = self.epsilon
exploration = random.random()
if (exploration < epsilon):
chosen_action = np.random.choice(possible_actions)
else:
chosen_action = self.get_best_action(state)
return chosen_action
###Output
_____no_output_____
###Markdown
Try it on taxiHere we use the qlearning agent on taxi env from openai gym.You will need to insert a few agent functions here.
###Code
import gym
try:
env = gym.make('Taxi-v3')
except gym.error.DeprecatedEnv:
# Taxi-v2 was replaced with Taxi-v3 in gym 0.15.0
env = gym.make('Taxi-v2')
n_actions = env.action_space.n
agent = QLearningAgent(alpha=0.5, epsilon=0.25, discount=0.99,
get_legal_actions=lambda s: range(n_actions))
def play_and_train(env, agent, t_max=10**4):
"""
This function should
- run a full game, actions given by agent's e-greedy policy
- train agent using agent.update(...) whenever it is possible
- return total reward
"""
total_reward = 0.0
s = env.reset()
for t in range(t_max):
# get agent to pick action given state s.
a = agent.get_action(s)
next_s, r, done, _ = env.step(a)
# train (update) agent for state s
agent.update(s, a, r, next_s)
s = next_s
total_reward += r
if done:
break
return total_reward
from IPython.display import clear_output
rewards = []
for i in range(1000):
rewards.append(play_and_train(env, agent))
agent.epsilon *= 0.99
if i % 100 == 0:
clear_output(True)
print('eps =', agent.epsilon, 'mean reward =', np.mean(rewards[-10:]))
plt.plot(rewards)
plt.show()
###Output
eps = 2.9191091959171894e-05 mean reward = 7.7
###Markdown
Submit to Coursera I: Preparation
###Code
submit_rewards1 = rewards.copy()
###Output
_____no_output_____
###Markdown
Binarized state spacesUse agent to train efficiently on `CartPole-v0`.This environment has a continuous set of possible states, so you will have to group them into bins somehow.The simplest way is to use `round(x,n_digits)` (or `np.round`) to round a real number to a given amount of digits.The tricky part is to get the `n_digits` right for each state to train effectively.Note that you don't need to convert state to integers, but to __tuples__ of any kind of values.
###Code
env = gym.make("CartPole-v0")
n_actions = env.action_space.n
print("first state:%s" % (env.reset()))
plt.imshow(env.render('rgb_array'))
###Output
first state:[-0.04668313 -0.02412881 -0.02958455 0.00169593]
###Markdown
Play a few gamesWe need to estimate observation distributions. To do so, we'll play a few games and record all states.
###Code
all_states = []
for _ in range(1000):
all_states.append(env.reset())
done = False
while not done:
s, r, done, _ = env.step(env.action_space.sample())
# print(s)
all_states.append(s)
if done:
break
all_states = np.array(all_states)
for obs_i in range(env.observation_space.shape[0]):
plt.hist(all_states[:, obs_i], bins=20)
plt.show()
###Output
_____no_output_____
###Markdown
Binarize environment
###Code
from gym.core import ObservationWrapper
class Binarizer(ObservationWrapper):
def observation(self, state):
# hint: you can do that with round(x,n_digits)
# you may pick a different n_digits for each dimension
state[0] = np.round(state[0], 0)
state[1] = np.round(state[1], 1)
state[2] = np.round(state[2], 2)
state[3] = np.round(state[3], 1)
return tuple(state)
env = Binarizer(gym.make("CartPole-v0").env)
all_states = []
for _ in range(1000):
all_states.append(env.reset())
done = False
while not done:
s, r, done, _ = env.step(env.action_space.sample())
all_states.append(s)
if done:
break
all_states = np.array(all_states)
for obs_i in range(env.observation_space.shape[0]):
plt.hist(all_states[:, obs_i], bins=20)
plt.show()
###Output
_____no_output_____
###Markdown
Learn binarized policyNow let's train a policy that uses binarized state space.__Tips:__* If your binarization is too coarse, your agent may fail to find optimal policy. In that case, change binarization. * If your binarization is too fine-grained, your agent will take much longer than 1000 steps to converge. You can either increase number of iterations and decrease epsilon decay or change binarization.* Having $10^3$–$10^4$ distinct states is recommended (`len(QLearningAgent._qvalues)`), but not required.* A reasonable agent should get to an average reward of at least 50.
###Code
agent = QLearningAgent(alpha=0.5, epsilon=0.25, discount=0.99,
get_legal_actions=lambda s: range(n_actions))
rewards = []
agent.epsilon = 0.15
for i in range(10000):
rewards.append(play_and_train(env, agent))
# OPTIONAL: <YOUR CODE: adjust epsilon>
if i % 100 == 0:
clear_output(True)
print('eps =', agent.epsilon, 'mean reward =', np.mean(rewards[-10:]))
plt.plot(rewards)
plt.show()
dir(QLearningAgent)
###Output
_____no_output_____
###Markdown
Submit to Coursera II: Submission
###Code
submit_rewards2 = rewards.copy()
from submit import submit_qlearning
submit_qlearning(submit_rewards1, submit_rewards2, '', '')
###Output
Submitted to Coursera platform. See results on assignment page!
###Markdown
Q-learningThis notebook will guide you through implementation of vanilla Q-learning algorithm.You need to implement QLearningAgent (follow instructions for each method) and use it on a number of tests below.
###Code
#XVFB will be launched if you run on a server
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
os.environ['DISPLAY'] = ':1'
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
%%writefile qlearning.py
from collections import defaultdict
import random, math
import numpy as np
class QLearningAgent:
def __init__(self, alpha, epsilon, discount, get_legal_actions):
"""
Q-Learning Agent
based on http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
Instance variables you have access to
- self.epsilon (exploration prob)
- self.alpha (learning rate)
- self.discount (discount rate aka gamma)
Functions you should use
- self.get_legal_actions(state) {state, hashable -> list of actions, each is hashable}
which returns legal actions for a state
- self.get_qvalue(state,action)
which returns Q(state,action)
- self.set_qvalue(state,action,value)
which sets Q(state,action) := value
!!!Important!!!
Note: please avoid using self._qValues directly.
There's a special self.get_qvalue/set_qvalue for that.
"""
self.get_legal_actions = get_legal_actions
self._qvalues = defaultdict(lambda: defaultdict(lambda: 0))
self.alpha = alpha
self.epsilon = epsilon
self.discount = discount
def get_qvalue(self, state, action):
""" Returns Q(state,action) """
return self._qvalues[state][action]
def set_qvalue(self,state,action,value):
""" Sets the Qvalue for [state,action] to the given value """
self._qvalues[state][action] = value
#---------------------START OF YOUR CODE---------------------#
def get_value(self, state):
"""
Compute your agent's estimate of V(s) using current q-values
V(s) = max_over_action Q(state,action) over possible actions.
Note: please take into account that q-values can be negative.
"""
possible_actions = self.get_legal_actions(state)
#If there are no legal actions, return 0.0
if len(possible_actions) == 0:
return 0.0
#<YOUR CODE HERE>
else:
value = np.max([self.get_qvalue(state,i) for i in possible_actions])
return value
def update(self, state, action, reward, next_state):
"""
You should do your Q-Value update here:
Q(s,a) := (1 - alpha) * Q(s,a) + alpha * (r + gamma * V(s'))
"""
#agent parameters
gamma = self.discount
learning_rate = self.alpha
#<YOUR CODE HERE>
new_q_value = (1 - learning_rate) * self.get_qvalue(state, action) + learning_rate*(reward + (gamma*self.get_value(next_state)))
self.set_qvalue(state, action, new_q_value)
def get_best_action(self, state):
"""
Compute the best action to take in a state (using current q-values).
"""
possible_actions = self.get_legal_actions(state)
#If there are no legal actions, return None
if len(possible_actions) == 0:
return None
#<YOUR CODE HERE>
q_values = [self.get_qvalue(state,i) for i in possible_actions]
best_action = possible_actions[np.argmax(q_values)]
return best_action
def get_action(self, state):
"""
Compute the action to take in the current state, including exploration.
With probability self.epsilon, we should take a random action.
otherwise - the best policy action (self.getPolicy).
Note: To pick randomly from a list, use random.choice(list).
To pick True or False with a given probablity, generate uniform number in [0, 1]
and compare it with your probability
"""
# Pick Action
possible_actions = self.get_legal_actions(state)
action = self.get_best_action(state)
#If there are no legal actions, return None
if len(possible_actions) == 0:
return None
#agent parameters:
epsilon = self.epsilon
#<YOUR CODE HERE>
ch = np.random.choice([0,1], p=[epsilon,1-epsilon])
if ch:
chosen_action = action
else:
chosen_action = np.random.choice(possible_actions)
return chosen_action
###Output
Overwriting qlearning.py
###Markdown
Try it on taxiHere we use the qlearning agent on taxi env from openai gym.You will need to insert a few agent functions here.
###Code
import gym
env = gym.make("Taxi-v2")
n_actions = env.action_space.n
from qlearning import QLearningAgent
agent = QLearningAgent(alpha=0.5, epsilon=0.25, discount=0.99,
get_legal_actions = lambda s: range(n_actions))
def play_and_train(env,agent,t_max=10**4):
"""
This function should
- run a full game, actions given by agent's e-greedy policy
- train agent using agent.update(...) whenever it is possible
- return total reward
"""
total_reward = 0.0
s = env.reset()
for t in range(t_max):
# get agent to pick action given state s.
a = agent.get_action(s)
next_s, r, done, _ = env.step(a)
# train (update) agent for state s
agent.update(s, a, r, next_s)
s = next_s
total_reward +=r
if done: break
return total_reward
from IPython.display import clear_output
rewards = []
for i in range(1000):
rewards.append(play_and_train(env, agent))
agent.epsilon *= 0.99
if i %100 ==0:
clear_output(True)
print('eps =', agent.epsilon, 'mean reward =', np.mean(rewards[-10:]))
plt.plot(rewards)
plt.show()
###Output
eps = 2.9191091959171894e-05 mean reward = 8.0
###Markdown
Submit to Coursera I: Preparation
###Code
submit_rewards1 = rewards.copy()
###Output
_____no_output_____
###Markdown
Binarized state spacesUse agent to train efficiently on CartPole-v0.This environment has a continuous set of possible states, so you will have to group them into bins somehow.The simplest way is to use `round(x,n_digits)` (or numpy round) to round real number to a given amount of digits.The tricky part is to get the n_digits right for each state to train effectively.Note that you don't need to convert state to integers, but to __tuples__ of any kind of values.
###Code
env = gym.make("CartPole-v0")
n_actions = env.action_space.n
print("first state:%s" % (env.reset()))
plt.imshow(env.render('rgb_array'))
###Output
first state:[ 0.03739972 0.03263197 -0.01945268 -0.01911927]
###Markdown
Play a few gamesWe need to estimate observation distributions. To do so, we'll play a few games and record all states.
###Code
all_states = []
for _ in range(1000):
all_states.append(env.reset())
done = False
while not done:
s, r, done, _ = env.step(env.action_space.sample())
all_states.append(s)
if done: break
all_states = np.array(all_states)
for obs_i in range(env.observation_space.shape[0]):
plt.hist(all_states[:, obs_i], bins=20)
plt.show()
###Output
_____no_output_____
###Markdown
Binarize environment
###Code
from gym.core import ObservationWrapper
class Binarizer(ObservationWrapper):
def observation(self, state):
#state = <round state to some amount digits.>
#hint: you can do that with round(x,n_digits)
#you will need to pick a different n_digits for each dimension
state[0] = np.round(state[0], 0)
state[1] = np.round(state[1], 1)
state[2] = np.round(state[2], 2)
state[3] = np.round(state[3], 1)
return tuple(state)
env = Binarizer(gym.make("CartPole-v0"))
all_states = []
for _ in range(1000):
all_states.append(env.reset())
done = False
while not done:
s, r, done, _ = env.step(env.action_space.sample())
all_states.append(s)
if done: break
all_states = np.array(all_states)
for obs_i in range(env.observation_space.shape[0]):
plt.hist(all_states[:,obs_i],bins=20)
plt.show()
###Output
_____no_output_____
###Markdown
Learn binarized policyNow let's train a policy that uses binarized state space.__Tips:__ * If your binarization is too coarse, your agent may fail to find optimal policy. In that case, change binarization. * If your binarization is too fine-grained, your agent will take much longer than 1000 steps to converge. You can either increase number of iterations and decrease epsilon decay or change binarization.* Having 10^3 ~ 10^4 distinct states is recommended (`len(QLearningAgent._qvalues)`), but not required.
###Code
agent = QLearningAgent(alpha=0.5, epsilon=0.25, discount=0.99,
get_legal_actions = lambda s: range(n_actions))
rewardsb = []
agent.epsilon = 0.15
for i in range(2000):
rewardsb.append(play_and_train(env,agent))
#OPTIONAL YOUR CODE: adjust epsilon
if i %100 ==0:
clear_output(True)
print('eps =', agent.epsilon, 'mean reward =', np.mean(rewardsb[-10:]))
plt.plot(rewardsb)
plt.show()
###Output
eps = 0.15 mean reward = 87.4
###Markdown
Submit to Coursera II: Submission
###Code
submit_rewards2 = rewardsb.copy()
from submit import submit_qlearning
EMAIL = "[email protected]"
TOKEN = "xx"
submit_qlearning(submit_rewards1, submit_rewards2, EMAIL, TOKEN)
###Output
Submitted to Coursera platform. See results on assignment page!
|
doc/LectureNotes/_build/jupyter_execute/chapter4.ipynb | ###Markdown
Logistic Regression[Video of Lecture](https://www.uio.no/studier/emner/matnat/fys/FYS-STK3155/h20/forelesningsvideoer/LectureSeptember18.mp4?vrtx=view-as-webpage) Logistic RegressionIn linear regression our main interest was centered on learning thecoefficients of a functional fit (say a polynomial) in order to beable to predict the response of a continuous variable on some unseendata. The fit to the continuous variable $y_i$ is based on someindependent variables $x_i$. Linear regression resulted inanalytical expressions for standard ordinary Least Squares or Ridgeregression (in terms of matrices to invert) for several quantities,ranging from the variance and thereby the confidence intervals of theoptimal parameters $\hat{\beta}$ to the mean squared error. If we can invertthe product of the design matrices, linear regression gives then asimple recipe for fitting our data.Classification problems, however, are concerned with outcomes takingthe form of discrete variables (i.e. categories). We may for example,on the basis of DNA sequencing for a number of patients, like to findout which mutations are important for a certain disease; or based onscans of various patients' brains, figure out if there is a tumor ornot; or given a specific physical system, we'd like to identify itsstate, say whether it is an ordered or disordered system (typicalsituation in solid state physics); or classify the status of apatient, whether she/he has a stroke or not and many other similarsituations.The most common situation we encounter when we apply logisticregression is that of two possible outcomes, normally denoted as abinary outcome, true or false, positive or negative, success orfailure etc.Logistic regression will also serve as our stepping stone towardsneural network algorithms and supervised deep learning. For logisticlearning, the minimization of the cost function leads to a non-linearequation in the parameters $\hat{\beta}$. The optimization of theproblem calls therefore for minimization algorithms. This forms thebottle neck of all machine learning algorithms, namely how to findreliable minima of a multi-variable function. This leads us to thefamily of gradient descent methods. The latter are the working horsesof basically all modern machine learning algorithms.We note also that many of the topics discussed here on logistic regression are also commonly used in modern supervised Deep Learningmodels, as we will see later. BasicsWe consider the case where the dependent variables, also called theresponses or the outcomes, $y_i$ are discrete and only take valuesfrom $k=0,\dots,K-1$ (i.e. $K$ classes).The goal is to predict theoutput classes from the design matrix $\boldsymbol{X}\in\mathbb{R}^{n\times p}$made of $n$ samples, each of which carries $p$ features or predictors. Theprimary goal is to identify the classes to which new unseen samplesbelong.Let us specialize to the case of two classes only, with outputs$y_i=0$ and $y_i=1$. Our outcomes could represent the status of acredit card user that could default or not on her/his credit carddebt. That is $$y_i = \begin{bmatrix} 0 & \mathrm{no}\\ 1 & \mathrm{yes} \end{bmatrix}.$$ Before moving to the logistic model, let us try to use our linearregression model to classify these two outcomes. We could for examplefit a linear model to the default case if $y_i > 0.5$ and the nodefault case $y_i \leq 0.5$.We would then have our weighted linear combination, namely $$\begin{equation}\boldsymbol{y} = \boldsymbol{X}^T\boldsymbol{\beta} + \boldsymbol{\epsilon},\label{_auto1} \tag{1}\end{equation}$$ where $\boldsymbol{y}$ is a vector representing the possible outcomes, $\boldsymbol{X}$ is our$n\times p$ design matrix and $\boldsymbol{\beta}$ represents our estimators/predictors.The main problem with our function is that it takes values on theentire real axis. In the case of logistic regression, however, thelabels $y_i$ are discrete variables. A typical example is the creditcard data discussed below here, where we can set the state ofdefaulting the debt to $y_i=1$ and not to $y_i=0$ for one the personsin the data set (see the full example below).One simple way to get a discrete output is to have signfunctions that map the output of a linear regressor to values $\{0,1\}$,$f(s_i)=sign(s_i)=1$ if $s_i\ge 0$ and 0 if otherwise. We will encounter this model in our first demonstration of neural networks. Historically it is called the ``perceptron" model in the machine learningliterature. This model is extremely simple. However, in many cases it is morefavorable to use a ``soft" classifier that outputsthe probability of a given category. This leads us to the logistic function.The following example on data for coronary heart disease (CHD) as function of age may serve as an illustration. In the code here we read and plot whether a person has had CHD (output = 1) or not (output = 0). This ouput is plotted the person's against age. Clearly, the figure shows that attempting to make a standard linear regression fit may not be very meaningful.
###Code
%matplotlib inline
# Common imports
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from sklearn.metrics import mean_squared_error
from IPython.display import display
from pylab import plt, mpl
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
infile = open(data_path("chddata.csv"),'r')
# Read the chd data as csv file and organize the data into arrays with age group, age, and chd
chd = pd.read_csv(infile, names=('ID', 'Age', 'Agegroup', 'CHD'))
chd.columns = ['ID', 'Age', 'Agegroup', 'CHD']
output = chd['CHD']
age = chd['Age']
agegroup = chd['Agegroup']
numberID = chd['ID']
display(chd)
plt.scatter(age, output, marker='o')
plt.axis([18,70.0,-0.1, 1.2])
plt.xlabel(r'Age')
plt.ylabel(r'CHD')
plt.title(r'Age distribution and Coronary heart disease')
plt.show()
###Output
_____no_output_____
###Markdown
What we could attempt however is to plot the mean value for each group.
###Code
agegroupmean = np.array([0.1, 0.133, 0.250, 0.333, 0.462, 0.625, 0.765, 0.800])
group = np.array([1, 2, 3, 4, 5, 6, 7, 8])
plt.plot(group, agegroupmean, "r-")
plt.axis([0,9,0, 1.0])
plt.xlabel(r'Age group')
plt.ylabel(r'CHD mean values')
plt.title(r'Mean values for each age group')
plt.show()
###Output
_____no_output_____
###Markdown
We are now trying to find a function $f(y\vert x)$, that is a function which gives us an expected value for the output $y$ with a given input $x$.In standard linear regression with a linear dependence on $x$, we would write this in terms of our model $$f(y_i\vert x_i)=\beta_0+\beta_1 x_i.$$ This expression implies however that $f(y_i\vert x_i)$ could take anyvalue from minus infinity to plus infinity. If we however let$f(y\vert y)$ be represented by the mean value, the above exampleshows us that we can constrain the function to take values betweenzero and one, that is we have $0 \le f(y_i\vert x_i) \le 1$. Lookingat our last curve we see also that it has an S-shaped form. This leadsus to a very popular model for the function $f$, namely the so-calledSigmoid function or logistic model. We will consider this function asrepresenting the probability for finding a value of $y_i$ with a given$x_i$. The logistic functionAnother widely studied model, is the so-called perceptron model, which is an example of a "hard classification" model. Wewill encounter this model when we discuss neural networks aswell. Each datapoint is deterministically assigned to a category (i.e$y_i=0$ or $y_i=1$). In many cases, and the coronary heart disease data forms one of many such examples, it is favorable to have a "soft"classifier that outputs the probability of a given category ratherthan a single value. For example, given $x_i$, the classifieroutputs the probability of being in a category $k$. Logistic regressionis the most common example of a so-called soft classifier. In logisticregression, the probability that a data point $x_i$belongs to a category $y_i=\{0,1\}$ is given by the so-called logit function (or Sigmoid) which is meant to represent the likelihood for a given event, $$p(t) = \frac{1}{1+\mathrm \exp{-t}}=\frac{\exp{t}}{1+\mathrm \exp{t}}.$$ Note that $1-p(t)= p(-t)$. Examples of likelihood functions used in logistic regression and nueral networksThe following code plots the logistic function, the step function and other functions we will encounter from here and on.
###Code
"""The sigmoid function (or the logistic curve) is a
function that takes any real number, z, and outputs a number (0,1).
It is useful in neural networks for assigning weights on a relative scale.
The value z is the weighted sum of parameters involved in the learning algorithm."""
import numpy
import matplotlib.pyplot as plt
import math as mt
z = numpy.arange(-5, 5, .1)
sigma_fn = numpy.vectorize(lambda z: 1/(1+numpy.exp(-z)))
sigma = sigma_fn(z)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, sigma)
ax.set_ylim([-0.1, 1.1])
ax.set_xlim([-5,5])
ax.grid(True)
ax.set_xlabel('z')
ax.set_title('sigmoid function')
plt.show()
"""Step Function"""
z = numpy.arange(-5, 5, .02)
step_fn = numpy.vectorize(lambda z: 1.0 if z >= 0.0 else 0.0)
step = step_fn(z)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, step)
ax.set_ylim([-0.5, 1.5])
ax.set_xlim([-5,5])
ax.grid(True)
ax.set_xlabel('z')
ax.set_title('step function')
plt.show()
"""tanh Function"""
z = numpy.arange(-2*mt.pi, 2*mt.pi, 0.1)
t = numpy.tanh(z)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, t)
ax.set_ylim([-1.0, 1.0])
ax.set_xlim([-2*mt.pi,2*mt.pi])
ax.grid(True)
ax.set_xlabel('z')
ax.set_title('tanh function')
plt.show()
###Output
_____no_output_____
###Markdown
We assume now that we have two classes with $y_i$ either $0$ or $1$. Furthermore we assume also that we have only two parameters $\beta$ in our fitting of the Sigmoid function, that is we define probabilities $$\begin{align*}p(y_i=1|x_i,\boldsymbol{\beta}) &= \frac{\exp{(\beta_0+\beta_1x_i)}}{1+\exp{(\beta_0+\beta_1x_i)}},\nonumber\\p(y_i=0|x_i,\boldsymbol{\beta}) &= 1 - p(y_i=1|x_i,\boldsymbol{\beta}),\end{align*}$$ where $\boldsymbol{\beta}$ are the weights we wish to extract from data, in our case $\beta_0$ and $\beta_1$. Note that we used $$p(y_i=0\vert x_i, \boldsymbol{\beta}) = 1-p(y_i=1\vert x_i, \boldsymbol{\beta}).$$ In order to define the total likelihood for all possible outcomes from a dataset $\mathcal{D}=\{(y_i,x_i)\}$, with the binary labels$y_i\in\{0,1\}$ and where the data points are drawn independently, we use the so-called [Maximum Likelihood Estimation](https://en.wikipedia.org/wiki/Maximum_likelihood_estimation) (MLE) principle. We aim thus at maximizing the probability of seeing the observed data. We can then approximate the likelihood in terms of the product of the individual probabilities of a specific outcome $y_i$, that is $$\begin{align*}P(\mathcal{D}|\boldsymbol{\beta})& = \prod_{i=1}^n \left[p(y_i=1|x_i,\boldsymbol{\beta})\right]^{y_i}\left[1-p(y_i=1|x_i,\boldsymbol{\beta}))\right]^{1-y_i}\nonumber \\\end{align*}$$ from which we obtain the log-likelihood and our **cost/loss** function $$\mathcal{C}(\boldsymbol{\beta}) = \sum_{i=1}^n \left( y_i\log{p(y_i=1|x_i,\boldsymbol{\beta})} + (1-y_i)\log\left[1-p(y_i=1|x_i,\boldsymbol{\beta}))\right]\right).$$ Reordering the logarithms, we can rewrite the **cost/loss** function as $$\mathcal{C}(\boldsymbol{\beta}) = \sum_{i=1}^n \left(y_i(\beta_0+\beta_1x_i) -\log{(1+\exp{(\beta_0+\beta_1x_i)})}\right).$$ The maximum likelihood estimator is defined as the set of parameters that maximize the log-likelihood where we maximize with respect to $\beta$.Since the cost (error) function is just the negative log-likelihood, for logistic regression we have that $$\mathcal{C}(\boldsymbol{\beta})=-\sum_{i=1}^n \left(y_i(\beta_0+\beta_1x_i) -\log{(1+\exp{(\beta_0+\beta_1x_i)})}\right).$$ This equation is known in statistics as the **cross entropy**. Finally, we note that just as in linear regression, in practice we often supplement the cross-entropy with additional regularization terms, usually $L_1$ and $L_2$ regularization as we did for Ridge and Lasso regression.The cross entropy is a convex function of the weights $\boldsymbol{\beta}$ and,therefore, any local minimizer is a global minimizer. Minimizing thiscost function with respect to the two parameters $\beta_0$ and $\beta_1$ we obtain $$\frac{\partial \mathcal{C}(\boldsymbol{\beta})}{\partial \beta_0} = -\sum_{i=1}^n \left(y_i -\frac{\exp{(\beta_0+\beta_1x_i)}}{1+\exp{(\beta_0+\beta_1x_i)}}\right),$$ and $$\frac{\partial \mathcal{C}(\boldsymbol{\beta})}{\partial \beta_1} = -\sum_{i=1}^n \left(y_ix_i -x_i\frac{\exp{(\beta_0+\beta_1x_i)}}{1+\exp{(\beta_0+\beta_1x_i)}}\right).$$ Let us now define a vector $\boldsymbol{y}$ with $n$ elements $y_i$, an$n\times p$ matrix $\boldsymbol{X}$ which contains the $x_i$ values and avector $\boldsymbol{p}$ of fitted probabilities $p(y_i\vert x_i,\boldsymbol{\beta})$. We can rewrite in a more compact form the firstderivative of cost function as $$\frac{\partial \mathcal{C}(\boldsymbol{\beta})}{\partial \boldsymbol{\beta}} = -\boldsymbol{X}^T\left(\boldsymbol{y}-\boldsymbol{p}\right).$$ If we in addition define a diagonal matrix $\boldsymbol{W}$ with elements $p(y_i\vert x_i,\boldsymbol{\beta})(1-p(y_i\vert x_i,\boldsymbol{\beta})$, we can obtain a compact expression of the second derivative as $$\frac{\partial^2 \mathcal{C}(\boldsymbol{\beta})}{\partial \boldsymbol{\beta}\partial \boldsymbol{\beta}^T} = \boldsymbol{X}^T\boldsymbol{W}\boldsymbol{X}.$$ Within a binary classification problem, we can easily expand our model to include multiple predictors. Our ratio between likelihoods is then with $p$ predictors $$\log{ \frac{p(\boldsymbol{\beta}\boldsymbol{x})}{1-p(\boldsymbol{\beta}\boldsymbol{x})}} = \beta_0+\beta_1x_1+\beta_2x_2+\dots+\beta_px_p.$$ Here we defined $\boldsymbol{x}=[1,x_1,x_2,\dots,x_p]$ and $\boldsymbol{\beta}=[\beta_0, \beta_1, \dots, \beta_p]$ leading to $$p(\boldsymbol{\beta}\boldsymbol{x})=\frac{ \exp{(\beta_0+\beta_1x_1+\beta_2x_2+\dots+\beta_px_p)}}{1+\exp{(\beta_0+\beta_1x_1+\beta_2x_2+\dots+\beta_px_p)}}.$$ Till now we have mainly focused on two classes, the so-called binarysystem. Suppose we wish to extend to $K$ classes. Let us for the sakeof simplicity assume we have only two predictors. We have then following model $$\log{\frac{p(C=1\vert x)}{p(K\vert x)}} = \beta_{10}+\beta_{11}x_1,$$ and $$\log{\frac{p(C=2\vert x)}{p(K\vert x)}} = \beta_{20}+\beta_{21}x_1,$$ and so on till the class $C=K-1$ class $$\log{\frac{p(C=K-1\vert x)}{p(K\vert x)}} = \beta_{(K-1)0}+\beta_{(K-1)1}x_1,$$ and the model is specified in term of $K-1$ so-called log-odds or**logit** transformations.In our discussion of neural networks we will encounter the above againin terms of a slightly modified function, the so-called **Softmax** function.The softmax function is used in various multiclass classificationmethods, such as multinomial logistic regression (also known assoftmax regression), multiclass linear discriminant analysis, naiveBayes classifiers, and artificial neural networks. Specifically, inmultinomial logistic regression and linear discriminant analysis, theinput to the function is the result of $K$ distinct linear functions,and the predicted probability for the $k$-th class given a samplevector $\boldsymbol{x}$ and a weighting vector $\boldsymbol{\beta}$ is (with twopredictors): $$p(C=k\vert \mathbf {x} )=\frac{\exp{(\beta_{k0}+\beta_{k1}x_1)}}{1+\sum_{l=1}^{K-1}\exp{(\beta_{l0}+\beta_{l1}x_1)}}.$$ It is easy to extend to more predictors. The final class is $$p(C=K\vert \mathbf {x} )=\frac{1}{1+\sum_{l=1}^{K-1}\exp{(\beta_{l0}+\beta_{l1}x_1)}},$$ and they sum to one. Our earlier discussions were all specialized tothe case with two classes only. It is easy to see from the above thatwhat we derived earlier is compatible with these equations.To find the optimal parameters we would typically use a gradientdescent method. Newton's method and gradient descent methods arediscussed in the material on [optimizationmethods](https://compphysics.github.io/MachineLearning/doc/pub/Splines/html/Splines-bs.html). Wisconsin Cancer DataWe show here how we can use a simple regression case on the breastcancer data using Logistic regression as our algorithm forclassification.
###Code
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
# Load the data
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
print(X_train.shape)
print(X_test.shape)
# Logistic Regression
logreg = LogisticRegression(solver='lbfgs')
logreg.fit(X_train, y_train)
print("Test set accuracy with Logistic Regression: {:.2f}".format(logreg.score(X_test,y_test)))
#now scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Logistic Regression
logreg.fit(X_train_scaled, y_train)
print("Test set accuracy Logistic Regression with scaled data: {:.2f}".format(logreg.score(X_test_scaled,y_test)))
###Output
_____no_output_____
###Markdown
In addition to the above scores, we could also study the covariance (and the correlation matrix).We use **Pandas** to compute the correlation matrix.
###Code
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
cancer = load_breast_cancer()
import pandas as pd
# Making a data frame
cancerpd = pd.DataFrame(cancer.data, columns=cancer.feature_names)
fig, axes = plt.subplots(15,2,figsize=(10,20))
malignant = cancer.data[cancer.target == 0]
benign = cancer.data[cancer.target == 1]
ax = axes.ravel()
for i in range(30):
_, bins = np.histogram(cancer.data[:,i], bins =50)
ax[i].hist(malignant[:,i], bins = bins, alpha = 0.5)
ax[i].hist(benign[:,i], bins = bins, alpha = 0.5)
ax[i].set_title(cancer.feature_names[i])
ax[i].set_yticks(())
ax[0].set_xlabel("Feature magnitude")
ax[0].set_ylabel("Frequency")
ax[0].legend(["Malignant", "Benign"], loc ="best")
fig.tight_layout()
plt.show()
import seaborn as sns
correlation_matrix = cancerpd.corr().round(1)
# use the heatmap function from seaborn to plot the correlation matrix
# annot = True to print the values inside the square
plt.figure(figsize=(15,8))
sns.heatmap(data=correlation_matrix, annot=True)
plt.show()
###Output
_____no_output_____
###Markdown
In the above example we note two things. In the first plot we displaythe overlap of benign and malignant tumors as functions of the variousfeatures in the Wisconsing breast cancer data set. We see that forsome of the features we can distinguish clearly the benign andmalignant cases while for other features we cannot. This can point tous which features may be of greater interest when we wish to classifya benign or not benign tumour.In the second figure we have computed the so-called correlationmatrix, which in our case with thirty features becomes a $30\times 30$matrix.We constructed this matrix using **pandas** via the statements
###Code
cancerpd = pd.DataFrame(cancer.data, columns=cancer.feature_names)
###Output
_____no_output_____
###Markdown
and then
###Code
correlation_matrix = cancerpd.corr().round(1)
###Output
_____no_output_____
###Markdown
Diagonalizing this matrix we can in turn say something about whichfeatures are of relevance and which are not. This leads us tothe classical Principal Component Analysis (PCA) theorem withapplications. This will be discussed later this semester ([week 43](https://compphysics.github.io/MachineLearning/doc/pub/week43/html/week43-bs.html)).
###Code
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
# Load the data
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
print(X_train.shape)
print(X_test.shape)
# Logistic Regression
logreg = LogisticRegression(solver='lbfgs')
logreg.fit(X_train, y_train)
print("Test set accuracy with Logistic Regression: {:.2f}".format(logreg.score(X_test,y_test)))
#now scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Logistic Regression
logreg.fit(X_train_scaled, y_train)
print("Test set accuracy Logistic Regression with scaled data: {:.2f}".format(logreg.score(X_test_scaled,y_test)))
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import cross_validate
#Cross validation
accuracy = cross_validate(logreg,X_test_scaled,y_test,cv=10)['test_score']
print(accuracy)
print("Test set accuracy with Logistic Regression and scaled data: {:.2f}".format(logreg.score(X_test_scaled,y_test)))
import scikitplot as skplt
y_pred = logreg.predict(X_test_scaled)
skplt.metrics.plot_confusion_matrix(y_test, y_pred, normalize=True)
plt.show()
y_probas = logreg.predict_proba(X_test_scaled)
skplt.metrics.plot_roc(y_test, y_probas)
plt.show()
skplt.metrics.plot_cumulative_gain(y_test, y_probas)
plt.show()
###Output
_____no_output_____
###Markdown
Optimization, the central part of any Machine Learning algortithmAlmost every problem in machine learning and data science starts witha dataset $X$, a model $g(\beta)$, which is a function of theparameters $\beta$ and a cost function $C(X, g(\beta))$ that allowsus to judge how well the model $g(\beta)$ explains the observations$X$. The model is fit by finding the values of $\beta$ that minimizethe cost function. Ideally we would be able to solve for $\beta$analytically, however this is not possible in general and we must usesome approximative/numerical method to compute the minimum. Revisiting our Logistic Regression caseIn our discussion on Logistic Regression we studied the case oftwo classes, with $y_i$ either$0$ or $1$. Furthermore we assumed also that we have only twoparameters $\beta$ in our fitting, that is wedefined probabilities $$\begin{align*}p(y_i=1|x_i,\boldsymbol{\beta}) &= \frac{\exp{(\beta_0+\beta_1x_i)}}{1+\exp{(\beta_0+\beta_1x_i)}},\nonumber\\p(y_i=0|x_i,\boldsymbol{\beta}) &= 1 - p(y_i=1|x_i,\boldsymbol{\beta}),\end{align*}$$ where $\boldsymbol{\beta}$ are the weights we wish to extract from data, in our case $\beta_0$ and $\beta_1$. The equations to solveOur compact equations used a definition of a vector $\boldsymbol{y}$ with $n$elements $y_i$, an $n\times p$ matrix $\boldsymbol{X}$ which contains the$x_i$ values and a vector $\boldsymbol{p}$ of fitted probabilities$p(y_i\vert x_i,\boldsymbol{\beta})$. We rewrote in a more compact formthe first derivative of the cost function as $$\frac{\partial \mathcal{C}(\boldsymbol{\beta})}{\partial \boldsymbol{\beta}} = -\boldsymbol{X}^T\left(\boldsymbol{y}-\boldsymbol{p}\right).$$ If we in addition define a diagonal matrix $\boldsymbol{W}$ with elements $p(y_i\vert x_i,\boldsymbol{\beta})(1-p(y_i\vert x_i,\boldsymbol{\beta})$, we can obtain a compact expression of the second derivative as $$\frac{\partial^2 \mathcal{C}(\boldsymbol{\beta})}{\partial \boldsymbol{\beta}\partial \boldsymbol{\beta}^T} = \boldsymbol{X}^T\boldsymbol{W}\boldsymbol{X}.$$ This defines what is called the Hessian matrix. Solving using Newton-Raphson's methodIf we can set up these equations, Newton-Raphson's iterative method is normally the method of choice. It requires however that we can compute in an efficient way the matrices that define the first and second derivatives. Our iterative scheme is then given by $$\boldsymbol{\beta}^{\mathrm{new}} = \boldsymbol{\beta}^{\mathrm{old}}-\left(\frac{\partial^2 \mathcal{C}(\boldsymbol{\beta})}{\partial \boldsymbol{\beta}\partial \boldsymbol{\beta}^T}\right)^{-1}_{\boldsymbol{\beta}^{\mathrm{old}}}\times \left(\frac{\partial \mathcal{C}(\boldsymbol{\beta})}{\partial \boldsymbol{\beta}}\right)_{\boldsymbol{\beta}^{\mathrm{old}}},$$ or in matrix form as $$\boldsymbol{\beta}^{\mathrm{new}} = \boldsymbol{\beta}^{\mathrm{old}}-\left(\boldsymbol{X}^T\boldsymbol{W}\boldsymbol{X} \right)^{-1}\times \left(-\boldsymbol{X}^T(\boldsymbol{y}-\boldsymbol{p}) \right)_{\boldsymbol{\beta}^{\mathrm{old}}}.$$ The right-hand side is computed with the old values of $\beta$. If we can compute these matrices, in particular the Hessian, the above is often the easiest method to implement. Brief reminder on Newton-Raphson's methodLet us quickly remind ourselves how we derive the above method.Perhaps the most celebrated of all one-dimensional root-findingroutines is Newton's method, also called the Newton-Raphsonmethod. This method requires the evaluation of both thefunction $f$ and its derivative $f'$ at arbitrary points. If you can only calculate the derivativenumerically and/or your function is not of the smooth type, wenormally discourage the use of this method. The equationsThe Newton-Raphson formula consists geometrically of extending thetangent line at a current point until it crosses zero, then settingthe next guess to the abscissa of that zero-crossing. The mathematicsbehind this method is rather simple. Employing a Taylor expansion for$x$ sufficiently close to the solution $s$, we have $$f(s)=0=f(x)+(s-x)f'(x)+\frac{(s-x)^2}{2}f''(x) +\dots. \label{eq:taylornr} \tag{2}$$ For small enough values of the function and for well-behavedfunctions, the terms beyond linear are unimportant, hence we obtain $$f(x)+(s-x)f'(x)\approx 0,$$ yielding $$s\approx x-\frac{f(x)}{f'(x)}.$$ Having in mind an iterative procedure, it is natural to start iterating with $$x_{n+1}=x_n-\frac{f(x_n)}{f'(x_n)}.$$ Simple geometric interpretationThe above is Newton-Raphson's method. It has a simple geometricinterpretation, namely $x_{n+1}$ is the point where the tangent from$(x_n,f(x_n))$ crosses the $x$-axis. Close to the solution,Newton-Raphson converges fast to the desired result. However, if weare far from a root, where the higher-order terms in the series areimportant, the Newton-Raphson formula can give grossly inaccurateresults. For instance, the initial guess for the root might be so farfrom the true root as to let the search interval include a localmaximum or minimum of the function. If an iteration places a trialguess near such a local extremum, so that the first derivative nearlyvanishes, then Newton-Raphson may fail totally Extending to more than one variableNewton's method can be generalized to systems of several non-linear equationsand variables. Consider the case with two equations $$\begin{array}{cc} f_1(x_1,x_2) &=0\\ f_2(x_1,x_2) &=0,\end{array}$$ which we Taylor expand to obtain $$\begin{array}{cc} 0=f_1(x_1+h_1,x_2+h_2)=&f_1(x_1,x_2)+h_1 \partial f_1/\partial x_1+h_2 \partial f_1/\partial x_2+\dots\\ 0=f_2(x_1+h_1,x_2+h_2)=&f_2(x_1,x_2)+h_1 \partial f_2/\partial x_1+h_2 \partial f_2/\partial x_2+\dots \end{array}.$$ Defining the Jacobian matrix $\boldsymbol{J}$ we have $$\boldsymbol{J}=\left( \begin{array}{cc} \partial f_1/\partial x_1 & \partial f_1/\partial x_2 \\ \partial f_2/\partial x_1 &\partial f_2/\partial x_2 \end{array} \right),$$ we can rephrase Newton's method as $$\left(\begin{array}{c} x_1^{n+1} \\ x_2^{n+1} \end{array} \right)=\left(\begin{array}{c} x_1^{n} \\ x_2^{n} \end{array} \right)+\left(\begin{array}{c} h_1^{n} \\ h_2^{n} \end{array} \right),$$ where we have defined $$\left(\begin{array}{c} h_1^{n} \\ h_2^{n} \end{array} \right)= -\boldsymbol{J}^{-1} \left(\begin{array}{c} f_1(x_1^{n},x_2^{n}) \\ f_2(x_1^{n},x_2^{n}) \end{array} \right).$$ We need thus to compute the inverse of the Jacobian matrix and itis to understand that difficulties mayarise in case $\boldsymbol{J}$ is nearly singular.It is rather straightforward to extend the above scheme to systems ofmore than two non-linear equations. In our case, the Jacobian matrix is given by the Hessian that represents the second derivative of cost function. Steepest descentThe basic idea of gradient descent isthat a function $F(\mathbf{x})$, $\mathbf{x} \equiv (x_1,\cdots,x_n)$, decreases fastest if one goes from $\bf {x}$ in thedirection of the negative gradient $-\nabla F(\mathbf{x})$.It can be shown that if $$\mathbf{x}_{k+1} = \mathbf{x}_k - \gamma_k \nabla F(\mathbf{x}_k),$$ with $\gamma_k > 0$.For $\gamma_k$ small enough, then $F(\mathbf{x}_{k+1}) \leqF(\mathbf{x}_k)$. This means that for a sufficiently small $\gamma_k$we are always moving towards smaller function values, i.e a minimum. More on Steepest descentThe previous observation is the basis of the method of steepestdescent, which is also referred to as just gradient descent (GD). Onestarts with an initial guess $\mathbf{x}_0$ for a minimum of $F$ andcomputes new approximations according to $$\mathbf{x}_{k+1} = \mathbf{x}_k - \gamma_k \nabla F(\mathbf{x}_k), \ \ k \geq 0.$$ The parameter $\gamma_k$ is often referred to as the step length orthe learning rate within the context of Machine Learning. The idealIdeally the sequence $\{\mathbf{x}_k \}_{k=0}$ converges to a globalminimum of the function $F$. In general we do not know if we are in aglobal or local minimum. In the special case when $F$ is a convexfunction, all local minima are also global minima, so in this casegradient descent can converge to the global solution. The advantage ofthis scheme is that it is conceptually simple and straightforward toimplement. However the method in this form has some severelimitations:In machine learing we are often faced with non-convex high dimensionalcost functions with many local minima. Since GD is deterministic wewill get stuck in a local minimum, if the method converges, unless wehave a very good intial guess. This also implies that the scheme issensitive to the chosen initial condition.Note that the gradient is a function of $\mathbf{x} =(x_1,\cdots,x_n)$ which makes it expensive to compute numerically. The sensitiveness of the gradient descentThe gradient descent method is sensitive to the choice of learning rate $\gamma_k$. This is dueto the fact that we are only guaranteed that $F(\mathbf{x}_{k+1}) \leqF(\mathbf{x}_k)$ for sufficiently small $\gamma_k$. The problem is todetermine an optimal learning rate. If the learning rate is chosen toosmall the method will take a long time to converge and if it is toolarge we can experience erratic behavior.Many of these shortcomings can be alleviated by introducingrandomness. One such method is that of Stochastic Gradient Descent(SGD), see below. Convex functionsIdeally we want our cost/loss function to be convex(concave).First we give the definition of a convex set: A set $C$ in$\mathbb{R}^n$ is said to be convex if, for all $x$ and $y$ in $C$ andall $t \in (0,1)$ , the point $(1 − t)x + ty$ also belongs toC. Geometrically this means that every point on the line segmentconnecting $x$ and $y$ is in $C$ as discussed below.The convex subsets of $\mathbb{R}$ are the intervals of$\mathbb{R}$. Examples of convex sets of $\mathbb{R}^2$ are theregular polygons (triangles, rectangles, pentagons, etc...). Convex function**Convex function**: Let $X \subset \mathbb{R}^n$ be a convex set. Assume that the function $f: X \rightarrow \mathbb{R}$ is continuous, then $f$ is said to be convex if $$f(tx_1 + (1-t)x_2) \leq tf(x_1) + (1-t)f(x_2) $$ for all $x_1, x_2 \in X$ and for all $t \in [0,1]$. If $\leq$ is replaced with a strict inequaltiy in the definition, we demand $x_1 \neq x_2$ and $t\in(0,1)$ then $f$ is said to be strictly convex. For a single variable function, convexity means that if you draw a straight line connecting $f(x_1)$ and $f(x_2)$, the value of the function on the interval $[x_1,x_2]$ is always below the line as illustrated below. Conditions on convex functionsIn the following we state first and second-order conditions whichensures convexity of a function $f$. We write $D_f$ to denote thedomain of $f$, i.e the subset of $R^n$ where $f$ is defined. For moredetails and proofs we refer to: [S. Boyd and L. Vandenberghe. Convex Optimization. Cambridge University Press](http://stanford.edu/boyd/cvxbook/, 2004).**First order condition.**Suppose $f$ is differentiable (i.e $\nabla f(x)$ is well defined forall $x$ in the domain of $f$). Then $f$ is convex if and only if $D_f$is a convex set and $$f(y) \geq f(x) + \nabla f(x)^T (y-x) $$ holdsfor all $x,y \in D_f$. This condition means that for a convex functionthe first order Taylor expansion (right hand side above) at any pointa global under estimator of the function. To convince yourself you canmake a drawing of $f(x) = x^2+1$ and draw the tangent line to $f(x)$ andnote that it is always below the graph.**Second order condition.**Assume that $f$ is twicedifferentiable, i.e the Hessian matrix exists at each point in$D_f$. Then $f$ is convex if and only if $D_f$ is a convex set and itsHessian is positive semi-definite for all $x\in D_f$. For asingle-variable function this reduces to $f''(x) \geq 0$. Geometrically this means that $f$ has nonnegative curvatureeverywhere.This condition is particularly useful since it gives us an procedure for determining if the function under consideration is convex, apart from using the definition. More on convex functionsThe next result is of great importance to us and the reason why we aregoing on about convex functions. In machine learning we frequentlyhave to minimize a loss/cost function in order to find the bestparameters for the model we are considering. Ideally we want theglobal minimum (for high-dimensional models it is hard to knowif we have local or global minimum). However, if the cost/loss functionis convex the following result provides invaluable information:**Any minimum is global for convex functions.**Consider the problem of finding $x \in \mathbb{R}^n$ such that $f(x)$is minimal, where $f$ is convex and differentiable. Then, any point$x^*$ that satisfies $\nabla f(x^*) = 0$ is a global minimum.This result means that if we know that the cost/loss function is convex and we are able to find a minimum, we are guaranteed that it is a global minimum. Some simple problems1. Show that $f(x)=x^2$ is convex for $x \in \mathbb{R}$ using the definition of convexity. Hint: If you re-write the definition, $f$ is convex if the following holds for all $x,y \in D_f$ and any $\lambda \in [0,1]$ $\lambda f(x)+(1-\lambda)f(y)-f(\lambda x + (1-\lambda) y ) \geq 0$.2. Using the second order condition show that the following functions are convex on the specified domain. * $f(x) = e^x$ is convex for $x \in \mathbb{R}$. * $g(x) = -\ln(x)$ is convex for $x \in (0,\infty)$.3. Let $f(x) = x^2$ and $g(x) = e^x$. Show that $f(g(x))$ and $g(f(x))$ is convex for $x \in \mathbb{R}$. Also show that if $f(x)$ is any convex function than $h(x) = e^{f(x)}$ is convex.4. A norm is any function that satisfy the following properties * $f(\alpha x) = |\alpha| f(x)$ for all $\alpha \in \mathbb{R}$. * $f(x+y) \leq f(x) + f(y)$ * $f(x) \leq 0$ for all $x \in \mathbb{R}^n$ with equality if and only if $x = 0$Using the definition of convexity, try to show that a function satisfying the properties above is convex (the third condition is not needed to show this). Friday September 25[Video of Lecture](https://www.uio.no/studier/emner/matnat/fys/FYS-STK4155/h20/forelesningsvideoer/LectureSeptember25.mp4?vrtx=view-as-webpage) and [link to handwritten notes](https://github.com/CompPhysics/MachineLearning/blob/master/doc/HandWrittenNotes/NotesSeptember25.pdf). Standard steepest descentBefore we proceed, we would like to discuss the approach called the**standard Steepest descent** (different from the above steepest descent discussion), which again leads to us having to be ableto compute a matrix. It belongs to the class of Conjugate Gradient methods (CG).[The success of the CG method](https://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf)for finding solutions of non-linear problems is based on the theoryof conjugate gradients for linear systems of equations. It belongs tothe class of iterative methods for solving problems from linearalgebra of the type $$\boldsymbol{A}\boldsymbol{x} = \boldsymbol{b}.$$ In the iterative process we end up with a problem like $$\boldsymbol{r}= \boldsymbol{b}-\boldsymbol{A}\boldsymbol{x},$$ where $\boldsymbol{r}$ is the so-called residual or error in the iterative process.When we have found the exact solution, $\boldsymbol{r}=0$. Gradient methodThe residual is zero when we reach the minimum of the quadratic equation $$P(\boldsymbol{x})=\frac{1}{2}\boldsymbol{x}^T\boldsymbol{A}\boldsymbol{x} - \boldsymbol{x}^T\boldsymbol{b},$$ with the constraint that the matrix $\boldsymbol{A}$ is positive definite andsymmetric. This defines also the Hessian and we want it to be positive definite. Steepest descent methodWe denote the initial guess for $\boldsymbol{x}$ as $\boldsymbol{x}_0$. We can assume without loss of generality that $$\boldsymbol{x}_0=0,$$ or consider the system $$\boldsymbol{A}\boldsymbol{z} = \boldsymbol{b}-\boldsymbol{A}\boldsymbol{x}_0,$$ instead. Steepest descent methodOne can show that the solution $\boldsymbol{x}$ is also the unique minimizer of the quadratic form $$f(\boldsymbol{x}) = \frac{1}{2}\boldsymbol{x}^T\boldsymbol{A}\boldsymbol{x} - \boldsymbol{x}^T \boldsymbol{x} , \quad \boldsymbol{x}\in\mathbf{R}^n.$$ This suggests taking the first basis vector $\boldsymbol{r}_1$ (see below for definition) to be the gradient of $f$ at $\boldsymbol{x}=\boldsymbol{x}_0$, which equals $$\boldsymbol{A}\boldsymbol{x}_0-\boldsymbol{b},$$ and $\boldsymbol{x}_0=0$ it is equal $-\boldsymbol{b}$. Final expressionsWe can compute the residual iteratively as $$\boldsymbol{r}_{k+1}=\boldsymbol{b}-\boldsymbol{A}\boldsymbol{x}_{k+1},$$ which equals $$\boldsymbol{b}-\boldsymbol{A}(\boldsymbol{x}_k+\alpha_k\boldsymbol{r}_k),$$ or $$(\boldsymbol{b}-\boldsymbol{A}\boldsymbol{x}_k)-\alpha_k\boldsymbol{A}\boldsymbol{r}_k,$$ which gives $$\alpha_k = \frac{\boldsymbol{r}_k^T\boldsymbol{r}_k}{\boldsymbol{r}_k^T\boldsymbol{A}\boldsymbol{r}_k}$$ leading to the iterative scheme $$\boldsymbol{x}_{k+1}=\boldsymbol{x}_k-\alpha_k\boldsymbol{r}_{k},$$ Steepest descent example
###Code
import numpy as np
import numpy.linalg as la
import scipy.optimize as sopt
import matplotlib.pyplot as pt
from mpl_toolkits.mplot3d import axes3d
def f(x):
return 0.5*x[0]**2 + 2.5*x[1]**2
def df(x):
return np.array([x[0], 5*x[1]])
fig = pt.figure()
ax = fig.gca(projection="3d")
xmesh, ymesh = np.mgrid[-2:2:50j,-2:2:50j]
fmesh = f(np.array([xmesh, ymesh]))
ax.plot_surface(xmesh, ymesh, fmesh)
###Output
_____no_output_____
###Markdown
And then as countor plot
###Code
pt.axis("equal")
pt.contour(xmesh, ymesh, fmesh)
guesses = [np.array([2, 2./5])]
###Output
_____no_output_____
###Markdown
Find guesses
###Code
x = guesses[-1]
s = -df(x)
###Output
_____no_output_____
###Markdown
Run it!
###Code
def f1d(alpha):
return f(x + alpha*s)
alpha_opt = sopt.golden(f1d)
next_guess = x + alpha_opt * s
guesses.append(next_guess)
print(next_guess)
###Output
_____no_output_____
###Markdown
What happened?
###Code
pt.axis("equal")
pt.contour(xmesh, ymesh, fmesh, 50)
it_array = np.array(guesses)
pt.plot(it_array.T[0], it_array.T[1], "x-")
###Output
_____no_output_____
###Markdown
Conjugate gradient methodIn the CG method we define so-called conjugate directions and two vectors $\boldsymbol{s}$ and $\boldsymbol{t}$are said to beconjugate if $$\boldsymbol{s}^T\boldsymbol{A}\boldsymbol{t}= 0.$$ The philosophy of the CG method is to perform searches in various conjugate directionsof our vectors $\boldsymbol{x}_i$ obeying the above criterion, namely $$\boldsymbol{x}_i^T\boldsymbol{A}\boldsymbol{x}_j= 0.$$ Two vectors are conjugate if they are orthogonal with respect to this inner product. Being conjugate is a symmetric relation: if $\boldsymbol{s}$ is conjugate to $\boldsymbol{t}$, then $\boldsymbol{t}$ is conjugate to $\boldsymbol{s}$. Conjugate gradient methodAn example is given by the eigenvectors of the matrix $$\boldsymbol{v}_i^T\boldsymbol{A}\boldsymbol{v}_j= \lambda\boldsymbol{v}_i^T\boldsymbol{v}_j,$$ which is zero unless $i=j$. Conjugate gradient methodAssume now that we have a symmetric positive-definite matrix $\boldsymbol{A}$ of size$n\times n$. At each iteration $i+1$ we obtain the conjugate direction of a vector $$\boldsymbol{x}_{i+1}=\boldsymbol{x}_{i}+\alpha_i\boldsymbol{p}_{i}.$$ We assume that $\boldsymbol{p}_{i}$ is a sequence of $n$ mutually conjugate directions. Then the $\boldsymbol{p}_{i}$ form a basis of $R^n$ and we can expand the solution $ \boldsymbol{A}\boldsymbol{x} = \boldsymbol{b}$ in this basis, namely $$\boldsymbol{x} = \sum^{n}_{i=1} \alpha_i \boldsymbol{p}_i.$$ Conjugate gradient methodThe coefficients are given by $$\mathbf{A}\mathbf{x} = \sum^{n}_{i=1} \alpha_i \mathbf{A} \mathbf{p}_i = \mathbf{b}.$$ Multiplying with $\boldsymbol{p}_k^T$ from the left gives $$\boldsymbol{p}_k^T \boldsymbol{A}\boldsymbol{x} = \sum^{n}_{i=1} \alpha_i\boldsymbol{p}_k^T \boldsymbol{A}\boldsymbol{p}_i= \boldsymbol{p}_k^T \boldsymbol{b},$$ and we can define the coefficients $\alpha_k$ as $$\alpha_k = \frac{\boldsymbol{p}_k^T \boldsymbol{b}}{\boldsymbol{p}_k^T \boldsymbol{A} \boldsymbol{p}_k}$$ Conjugate gradient method and iterationsIf we choose the conjugate vectors $\boldsymbol{p}_k$ carefully, then we may not need all of them to obtain a good approximation to the solution $\boldsymbol{x}$. We want to regard the conjugate gradient method as an iterative method. This will us to solve systems where $n$ is so large that the direct method would take too much time.We denote the initial guess for $\boldsymbol{x}$ as $\boldsymbol{x}_0$. We can assume without loss of generality that $$\boldsymbol{x}_0=0,$$ or consider the system $$\boldsymbol{A}\boldsymbol{z} = \boldsymbol{b}-\boldsymbol{A}\boldsymbol{x}_0,$$ instead. Conjugate gradient methodOne can show that the solution $\boldsymbol{x}$ is also the unique minimizer of the quadratic form $$f(\boldsymbol{x}) = \frac{1}{2}\boldsymbol{x}^T\boldsymbol{A}\boldsymbol{x} - \boldsymbol{x}^T \boldsymbol{x} , \quad \boldsymbol{x}\in\mathbf{R}^n.$$ This suggests taking the first basis vector $\boldsymbol{p}_1$ to be the gradient of $f$ at $\boldsymbol{x}=\boldsymbol{x}_0$, which equals $$\boldsymbol{A}\boldsymbol{x}_0-\boldsymbol{b},$$ and $\boldsymbol{x}_0=0$ it is equal $-\boldsymbol{b}$.The other vectors in the basis will be conjugate to the gradient, hence the name conjugate gradient method. Conjugate gradient methodLet $\boldsymbol{r}_k$ be the residual at the $k$-th step: $$\boldsymbol{r}_k=\boldsymbol{b}-\boldsymbol{A}\boldsymbol{x}_k.$$ Note that $\boldsymbol{r}_k$ is the negative gradient of $f$ at $\boldsymbol{x}=\boldsymbol{x}_k$, so the gradient descent method would be to move in the direction $\boldsymbol{r}_k$. Here, we insist that the directions $\boldsymbol{p}_k$ are conjugate to each other, so we take the direction closest to the gradient $\boldsymbol{r}_k$ under the conjugacy constraint. This gives the following expression $$\boldsymbol{p}_{k+1}=\boldsymbol{r}_k-\frac{\boldsymbol{p}_k^T \boldsymbol{A}\boldsymbol{r}_k}{\boldsymbol{p}_k^T\boldsymbol{A}\boldsymbol{p}_k} \boldsymbol{p}_k.$$ Conjugate gradient methodWe can also compute the residual iteratively as $$\boldsymbol{r}_{k+1}=\boldsymbol{b}-\boldsymbol{A}\boldsymbol{x}_{k+1},$$ which equals $$\boldsymbol{b}-\boldsymbol{A}(\boldsymbol{x}_k+\alpha_k\boldsymbol{p}_k),$$ or $$(\boldsymbol{b}-\boldsymbol{A}\boldsymbol{x}_k)-\alpha_k\boldsymbol{A}\boldsymbol{p}_k,$$ which gives $$\boldsymbol{r}_{k+1}=\boldsymbol{r}_k-\boldsymbol{A}\boldsymbol{p}_{k},$$ Revisiting our first homeworkWe will use linear regression as a case study for the gradient descentmethods. Linear regression is a great test case for the gradientdescent methods discussed in the lectures since it has severaldesirable properties such as:1. An analytical solution (recall homework set 1).2. The gradient can be computed analytically.3. The cost function is convex which guarantees that gradient descent converges for small enough learning ratesWe revisit an example similar to what we had in the first homework set. We had a function of the type
###Code
x = 2*np.random.rand(m,1)
y = 4+3*x+np.random.randn(m,1)
###Output
_____no_output_____
###Markdown
with $x_i \in [0,1] $ is chosen randomly using a uniform distribution. Additionally we have a stochastic noise chosen according to a normal distribution $\cal {N}(0,1)$. The linear regression model is given by $$h_\beta(x) = \boldsymbol{y} = \beta_0 + \beta_1 x,$$ such that $$\boldsymbol{y}_i = \beta_0 + \beta_1 x_i.$$ Gradient descent exampleLet $\mathbf{y} = (y_1,\cdots,y_n)^T$, $\mathbf{\boldsymbol{y}} = (\boldsymbol{y}_1,\cdots,\boldsymbol{y}_n)^T$ and $\beta = (\beta_0, \beta_1)^T$It is convenient to write $\mathbf{\boldsymbol{y}} = X\beta$ where $X \in \mathbb{R}^{100 \times 2} $ is the design matrix given by (we keep the intercept here) $$X \equiv \begin{bmatrix}1 & x_1 \\\vdots & \vdots \\1 & x_{100} & \\\end{bmatrix}.$$ The cost/loss/risk function is given by ( $$C(\beta) = \frac{1}{n}||X\beta-\mathbf{y}||_{2}^{2} = \frac{1}{n}\sum_{i=1}^{100}\left[ (\beta_0 + \beta_1 x_i)^2 - 2 y_i (\beta_0 + \beta_1 x_i) + y_i^2\right]$$ and we want to find $\beta$ such that $C(\beta)$ is minimized. The derivative of the cost/loss functionComputing $\partial C(\beta) / \partial \beta_0$ and $\partial C(\beta) / \partial \beta_1$ we can show that the gradient can be written as $$\nabla_{\beta} C(\beta) = \frac{2}{n}\begin{bmatrix} \sum_{i=1}^{100} \left(\beta_0+\beta_1x_i-y_i\right) \\\sum_{i=1}^{100}\left( x_i (\beta_0+\beta_1x_i)-y_ix_i\right) \\\end{bmatrix} = \frac{2}{n}X^T(X\beta - \mathbf{y}),$$ where $X$ is the design matrix defined above. The Hessian matrixThe Hessian matrix of $C(\beta)$ is given by $$\boldsymbol{H} \equiv \begin{bmatrix}\frac{\partial^2 C(\beta)}{\partial \beta_0^2} & \frac{\partial^2 C(\beta)}{\partial \beta_0 \partial \beta_1} \\\frac{\partial^2 C(\beta)}{\partial \beta_0 \partial \beta_1} & \frac{\partial^2 C(\beta)}{\partial \beta_1^2} & \\\end{bmatrix} = \frac{2}{n}X^T X.$$ This result implies that $C(\beta)$ is a convex function since the matrix $X^T X$ always is positive semi-definite. Simple programWe can now write a program that minimizes $C(\beta)$ using the gradient descent method with a constant learning rate $\gamma$ according to $$\beta_{k+1} = \beta_k - \gamma \nabla_\beta C(\beta_k), \ k=0,1,\cdots$$ We can use the expression we computed for the gradient and let use a$\beta_0$ be chosen randomly and let $\gamma = 0.001$. Stop iteratingwhen $||\nabla_\beta C(\beta_k) || \leq \epsilon = 10^{-8}$. **Note that the code below does not include the latter stop criterion**.And finally we can compare our solution for $\beta$ with the analytic result given by $\beta= (X^TX)^{-1} X^T \mathbf{y}$. Gradient Descent ExampleHere our simple example
###Code
# Importing various packages
from random import random, seed
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import sys
# the number of datapoints
n = 100
x = 2*np.random.rand(n,1)
y = 4+3*x+np.random.randn(n,1)
X = np.c_[np.ones((n,1)), x]
# Hessian matrix
H = (2.0/n)* X.T @ X
# Get the eigenvalues
EigValues, EigVectors = np.linalg.eig(H)
print(EigValues)
beta_linreg = np.linalg.inv(X.T @ X) @ X.T @ y
print(beta_linreg)
beta = np.random.randn(2,1)
eta = 1.0/np.max(EigValues)
Niterations = 1000
for iter in range(Niterations):
gradient = (2.0/n)*X.T @ (X @ beta-y)
beta -= eta*gradient
print(beta)
xnew = np.array([[0],[2]])
xbnew = np.c_[np.ones((2,1)), xnew]
ypredict = xbnew.dot(beta)
ypredict2 = xbnew.dot(beta_linreg)
plt.plot(xnew, ypredict, "r-")
plt.plot(xnew, ypredict2, "b-")
plt.plot(x, y ,'ro')
plt.axis([0,2.0,0, 15.0])
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.title(r'Gradient descent example')
plt.show()
###Output
_____no_output_____
###Markdown
And a corresponding example using **scikit-learn**
###Code
# Importing various packages
from random import random, seed
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDRegressor
n = 100
x = 2*np.random.rand(n,1)
y = 4+3*x+np.random.randn(n,1)
X = np.c_[np.ones((n,1)), x]
beta_linreg = np.linalg.inv(X.T @ X) @ (X.T @ y)
print(beta_linreg)
sgdreg = SGDRegressor(max_iter = 50, penalty=None, eta0=0.1)
sgdreg.fit(x,y.ravel())
print(sgdreg.intercept_, sgdreg.coef_)
###Output
_____no_output_____
###Markdown
Gradient descent and RidgeWe have also discussed Ridge regression where the loss function contains a regularized term given by the $L_2$ norm of $\beta$, $$C_{\text{ridge}}(\beta) = \frac{1}{n}||X\beta -\mathbf{y}||^2 + \lambda ||\beta||^2, \ \lambda \geq 0.$$ In order to minimize $C_{\text{ridge}}(\beta)$ using GD we only have adjust the gradient as follows $$\nabla_\beta C_{\text{ridge}}(\beta) = \frac{2}{n}\begin{bmatrix} \sum_{i=1}^{100} \left(\beta_0+\beta_1x_i-y_i\right) \\\sum_{i=1}^{100}\left( x_i (\beta_0+\beta_1x_i)-y_ix_i\right) \\\end{bmatrix} + 2\lambda\begin{bmatrix} \beta_0 \\ \beta_1\end{bmatrix} = 2 (X^T(X\beta - \mathbf{y})+\lambda \beta).$$ We can easily extend our program to minimize $C_{\text{ridge}}(\beta)$ using gradient descent and compare with the analytical solution given by $$\beta_{\text{ridge}} = \left(X^T X + \lambda I_{2 \times 2} \right)^{-1} X^T \mathbf{y}.$$ Program example for gradient descent with Ridge Regression
###Code
from random import random, seed
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import sys
# the number of datapoints
n = 100
x = 2*np.random.rand(n,1)
y = 4+3*x+np.random.randn(n,1)
X = np.c_[np.ones((n,1)), x]
XT_X = X.T @ X
#Ridge parameter lambda
lmbda = 0.001
Id = lmbda* np.eye(XT_X.shape[0])
beta_linreg = np.linalg.inv(XT_X+Id) @ X.T @ y
print(beta_linreg)
# Start plain gradient descent
beta = np.random.randn(2,1)
eta = 0.1
Niterations = 100
for iter in range(Niterations):
gradients = 2.0/n*X.T @ (X @ (beta)-y)+2*lmbda*beta
beta -= eta*gradients
print(beta)
ypredict = X @ beta
ypredict2 = X @ beta_linreg
plt.plot(x, ypredict, "r-")
plt.plot(x, ypredict2, "b-")
plt.plot(x, y ,'ro')
plt.axis([0,2.0,0, 15.0])
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.title(r'Gradient descent example for Ridge')
plt.show()
###Output
_____no_output_____
###Markdown
Using gradient descent methods, limitations* **Gradient descent (GD) finds local minima of our function**. Since the GD algorithm is deterministic, if it converges, it will converge to a local minimum of our cost/loss/risk function. Because in ML we are often dealing with extremely rugged landscapes with many local minima, this can lead to poor performance.* **GD is sensitive to initial conditions**. One consequence of the local nature of GD is that initial conditions matter. Depending on where one starts, one will end up at a different local minima. Therefore, it is very important to think about how one initializes the training process. This is true for GD as well as more complicated variants of GD.* **Gradients are computationally expensive to calculate for large datasets**. In many cases in statistics and ML, the cost/loss/risk function is a sum of terms, with one term for each data point. For example, in linear regression, $E \propto \sum_{i=1}^n (y_i - \mathbf{w}^T\cdot\mathbf{x}_i)^2$; for logistic regression, the square error is replaced by the cross entropy. To calculate the gradient we have to sum over *all* $n$ data points. Doing this at every GD step becomes extremely computationally expensive. An ingenious solution to this, is to calculate the gradients using small subsets of the data called "mini batches". This has the added benefit of introducing stochasticity into our algorithm.* **GD is very sensitive to choices of learning rates**. GD is extremely sensitive to the choice of learning rates. If the learning rate is very small, the training process take an extremely long time. For larger learning rates, GD can diverge and give poor results. Furthermore, depending on what the local landscape looks like, we have to modify the learning rates to ensure convergence. Ideally, we would *adaptively* choose the learning rates to match the landscape.* **GD treats all directions in parameter space uniformly.** Another major drawback of GD is that unlike Newton's method, the learning rate for GD is the same in all directions in parameter space. For this reason, the maximum learning rate is set by the behavior of the steepest direction and this can significantly slow down training. Ideally, we would like to take large steps in flat directions and small steps in steep directions. Since we are exploring rugged landscapes where curvatures change, this requires us to keep track of not only the gradient but second derivatives. The ideal scenario would be to calculate the Hessian but this proves to be too computationally expensive. * GD can take exponential time to escape saddle points, even with random initialization. As we mentioned, GD is extremely sensitive to initial condition since it determines the particular local minimum GD would eventually reach. However, even with a good initialization scheme, through the introduction of randomness, GD can still take exponential time to escape saddle points. Stochastic Gradient DescentStochastic gradient descent (SGD) and variants thereof address some ofthe shortcomings of the Gradient descent method discussed above.The underlying idea of SGD comes from the observation that the costfunction, which we want to minimize, can almost always be written as asum over $n$ data points $\{\mathbf{x}_i\}_{i=1}^n$, $$C(\mathbf{\beta}) = \sum_{i=1}^n c_i(\mathbf{x}_i,\mathbf{\beta}).$$ Computation of gradientsThis in turn means that the gradient can becomputed as a sum over $i$-gradients $$\nabla_\beta C(\mathbf{\beta}) = \sum_i^n \nabla_\beta c_i(\mathbf{x}_i,\mathbf{\beta}).$$ Stochasticity/randomness is introduced by only taking thegradient on a subset of the data called minibatches. If there are $n$data points and the size of each minibatch is $M$, there will be $n/M$minibatches. We denote these minibatches by $B_k$ where$k=1,\cdots,n/M$. SGD exampleAs an example, suppose we have $10$ data points $(\mathbf{x}_1,\cdots, \mathbf{x}_{10})$ and we choose to have $M=5$ minibathces,then each minibatch contains two data points. In particular we have$B_1 = (\mathbf{x}_1,\mathbf{x}_2), \cdots, B_5 =(\mathbf{x}_9,\mathbf{x}_{10})$. Note that if you choose $M=1$ youhave only a single batch with all data points and on the other extreme,you may choose $M=n$ resulting in a minibatch for each datapoint, i.e$B_k = \mathbf{x}_k$.The idea is now to approximate the gradient by replacing the sum overall data points with a sum over the data points in one the minibatchespicked at random in each gradient descent step $$\nabla_{\beta}C(\mathbf{\beta}) = \sum_{i=1}^n \nabla_\beta c_i(\mathbf{x}_i,\mathbf{\beta}) \rightarrow \sum_{i \in B_k}^n \nabla_\betac_i(\mathbf{x}_i, \mathbf{\beta}).$$ The gradient stepThus a gradient descent step now looks like $$\beta_{j+1} = \beta_j - \gamma_j \sum_{i \in B_k}^n \nabla_\beta c_i(\mathbf{x}_i,\mathbf{\beta})$$ where $k$ is picked at random with equalprobability from $[1,n/M]$. An iteration over the number ofminibathces (n/M) is commonly referred to as an epoch. Thus it istypical to choose a number of epochs and for each epoch iterate overthe number of minibatches, as exemplified in the code below. Simple example code
###Code
import numpy as np
n = 100 #100 datapoints
M = 5 #size of each minibatch
m = int(n/M) #number of minibatches
n_epochs = 10 #number of epochs
j = 0
for epoch in range(1,n_epochs+1):
for i in range(m):
k = np.random.randint(m) #Pick the k-th minibatch at random
#Compute the gradient using the data in minibatch Bk
#Compute new suggestion for
j += 1
###Output
_____no_output_____
###Markdown
Taking the gradient only on a subset of the data has two importantbenefits. First, it introduces randomness which decreases the chancethat our opmization scheme gets stuck in a local minima. Second, ifthe size of the minibatches are small relative to the number ofdatapoints ($M < n$), the computation of the gradient is muchcheaper since we sum over the datapoints in the $k-th$ minibatch and notall $n$ datapoints. When do we stop?A natural question is when do we stop the search for a new minimum?One possibility is to compute the full gradient after a given numberof epochs and check if the norm of the gradient is smaller than somethreshold and stop if true. However, the condition that the gradientis zero is valid also for local minima, so this would only tell usthat we are close to a local/global minimum. However, we could alsoevaluate the cost function at this point, store the result andcontinue the search. If the test kicks in at a later stage we cancompare the values of the cost function and keep the $\beta$ thatgave the lowest value. Slightly different approachAnother approach is to let the step length $\gamma_j$ depend on thenumber of epochs in such a way that it becomes very small after areasonable time such that we do not move at all.As an example, let $e = 0,1,2,3,\cdots$ denote the current epoch and let $t_0, t_1 > 0$ be two fixed numbers. Furthermore, let $t = e \cdot m + i$ where $m$ is the number of minibatches and $i=0,\cdots,m-1$. Then the function $$\gamma_j(t; t_0, t_1) = \frac{t_0}{t+t_1} $$ goes to zero as the number of epochs gets large. I.e. we start with a step length $\gamma_j (0; t_0, t_1) = t_0/t_1$ which decays in *time* $t$.In this way we can fix the number of epochs, compute $\beta$ andevaluate the cost function at the end. Repeating the computation willgive a different result since the scheme is random by design. Then wepick the final $\beta$ that gives the lowest value of the costfunction.
###Code
import numpy as np
def step_length(t,t0,t1):
return t0/(t+t1)
n = 100 #100 datapoints
M = 5 #size of each minibatch
m = int(n/M) #number of minibatches
n_epochs = 500 #number of epochs
t0 = 1.0
t1 = 10
gamma_j = t0/t1
j = 0
for epoch in range(1,n_epochs+1):
for i in range(m):
k = np.random.randint(m) #Pick the k-th minibatch at random
#Compute the gradient using the data in minibatch Bk
#Compute new suggestion for beta
t = epoch*m+i
gamma_j = step_length(t,t0,t1)
j += 1
print("gamma_j after %d epochs: %g" % (n_epochs,gamma_j))
###Output
_____no_output_____
###Markdown
Program for stochastic gradient
###Code
# Importing various packages
from math import exp, sqrt
from random import random, seed
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDRegressor
m = 100
x = 2*np.random.rand(m,1)
y = 4+3*x+np.random.randn(m,1)
X = np.c_[np.ones((m,1)), x]
theta_linreg = np.linalg.inv(X.T @ X) @ (X.T @ y)
print("Own inversion")
print(theta_linreg)
sgdreg = SGDRegressor(max_iter = 50, penalty=None, eta0=0.1)
sgdreg.fit(x,y.ravel())
print("sgdreg from scikit")
print(sgdreg.intercept_, sgdreg.coef_)
theta = np.random.randn(2,1)
eta = 0.1
Niterations = 1000
for iter in range(Niterations):
gradients = 2.0/m*X.T @ ((X @ theta)-y)
theta -= eta*gradients
print("theta from own gd")
print(theta)
xnew = np.array([[0],[2]])
Xnew = np.c_[np.ones((2,1)), xnew]
ypredict = Xnew.dot(theta)
ypredict2 = Xnew.dot(theta_linreg)
n_epochs = 50
t0, t1 = 5, 50
def learning_schedule(t):
return t0/(t+t1)
theta = np.random.randn(2,1)
for epoch in range(n_epochs):
for i in range(m):
random_index = np.random.randint(m)
xi = X[random_index:random_index+1]
yi = y[random_index:random_index+1]
gradients = 2 * xi.T @ ((xi @ theta)-yi)
eta = learning_schedule(epoch*m+i)
theta = theta - eta*gradients
print("theta from own sdg")
print(theta)
plt.plot(xnew, ypredict, "r-")
plt.plot(xnew, ypredict2, "b-")
plt.plot(x, y ,'ro')
plt.axis([0,2.0,0, 15.0])
plt.xlabel(r'$x$')
plt.ylabel(r'$y$')
plt.title(r'Random numbers ')
plt.show()
###Output
_____no_output_____
###Markdown
Work, Energy, Momentum and Conservation lawsIn the previous three chapters we have shown how to use Newton’s laws ofmotion to determine the motion of an object based on the forces actingon it. For two of the cases there is an underlying assumption that we can find an analytical solution to a continuous problem.With a continuous problem we mean a problem where the various variables can take any value within a finite or infinite interval. Unfortunately, in many cases wecannot find an exact solution to the equations of motion we get fromNewton’s second law. The numerical approach, where we discretize the continuous problem, allows us however to study a much richer set of problems.For problems involving Newton's laws and the various equations of motion we encounter, solving the equations numerically, is the standard approach.It allows us to focus on the underlying forces. Often we end up using the same numerical algorithm for different problems.Here we introduce a commonly used technique that allows us to find thevelocity as a function of position without finding the position as afunction of time—an alternate form of Newton’s second law. The methodis based on a simple principle: Instead of solving the equations ofmotion directly, we integrate the equations of motion. Such a methodis called an integration method. This allows us also to introduce the **work-energy** theorem. Thistheorem allows us to find the velocity as a function of position foran object even in cases when we cannot solve the equations ofmotion. This introduces us to the concept of work and kinetic energy,an energy related to the motion of an object.And finally, we will link the work-energy theorem with the principle of conservation of energy. The Work-Energy TheoremLet us define the kinetic energy $K$ with a given velocity $\boldsymbol{v}$ $$K=\frac{1}{2}mv^2,$$ where $m$ is the mass of the object we are considering.We assume also that there is a force $\boldsymbol{F}$ acting on the given object $$\boldsymbol{F}=\boldsymbol{F}(\boldsymbol{r},\boldsymbol{v},t),$$ with $\boldsymbol{r}$ the position and $t$ the time.In general we assume the force is a function of all these variables.Many of the more central forces in Nature however, depende only on theposition. Examples are the gravitational force and the force derivedfrom the Coulomb potential in electromagnetism.Let us study the derivative of the kinetic energy with respect to time $t$. Its continuous form is $$\frac{dK}{dt}=\frac{1}{2}m\frac{d\boldsymbol{v}\cdot\boldsymbol{v}}{dt}.$$ Using our results from exercise 3 of homework 1, we can write the derivative of a vector dot product as $$\frac{dK}{dt}=\frac{1}{2}m\frac{d\boldsymbol{v}\cdot\boldsymbol{v}}{dt}= \frac{1}{2}m\left(\frac{d\boldsymbol{v}}{dt}\cdot\boldsymbol{v}+\boldsymbol{v}\cdot\frac{d\boldsymbol{v}}{dt}\right)=m\frac{d\boldsymbol{v}}{dt}\cdot\boldsymbol{v}.$$ We know also that the acceleration is defined as $$\boldsymbol{a}=\frac{\boldsymbol{F}}{m}=\frac{d\boldsymbol{v}}{dt}.$$ We can then rewrite the equation for the derivative of the kinetic energy as $$\frac{dK}{dt}=m\frac{d\boldsymbol{v}}{dt}\boldsymbol{v}=\boldsymbol{F}\frac{d\boldsymbol{r}}{dt},$$ where we defined the velocity as the derivative of the position with respect to time.Let us now discretize the above equation by letting the instantaneous terms be replaced by a discrete quantity, that iswe let $dK\rightarrow \Delta K$, $dt\rightarrow \Delta t$, $d\boldsymbol{r}\rightarrow \Delta \boldsymbol{r}$ and $d\boldsymbol{v}\rightarrow \Delta \boldsymbol{v}$.We have then $$\frac{\Delta K}{\Delta t}=m\frac{\Delta \boldsymbol{v}}{\Delta t}\boldsymbol{v}=\boldsymbol{F}\frac{\Delta \boldsymbol{r}}{\Delta t},$$ or by multiplying out $\Delta t$ we have $$\Delta K=\boldsymbol{F}\Delta \boldsymbol{r}.$$ We define this quantity as the **work** done by the force $\boldsymbol{F}$during the displacement $\Delta \boldsymbol{r}$. If we study the dimensionalityof this problem we have mass times length squared divided by timesquared, or just dimension energy.If we now define a series of such displacements $\Delta\boldsymbol{r}$ we have a difference in kinetic energy at a final position $\boldsymbol{r}_n$ and an initial position $\boldsymbol{r}_0$ given by $$\Delta K=\frac{1}{2}mv_n^2-\frac{1}{2}mv_0^2=\sum_{i=0}^n\boldsymbol{F}_i\Delta \boldsymbol{r},$$ where $\boldsymbol{F}_i$ are the forces acting at every position $\boldsymbol{r}_i$.The work done by acting with a force on a set of displacements canthen be as expressed as the difference between the initial and finalkinetic energies.This defines the **work-energy** theorem.If we take the limit $\Delta \boldsymbol{r}\rightarrow 0$, we can rewrite the sum over the various displacements in terms of an integral, that is $$\Delta K=\frac{1}{2}mv_n^2-\frac{1}{2}mv_0^2=\sum_{i=0}^n\boldsymbol{F}_i\Delta \boldsymbol{r}\rightarrow \int_{\boldsymbol{r}_0}^{\boldsymbol{r}_n}\boldsymbol{F}(\boldsymbol{r},\boldsymbol{v},t)d\boldsymbol{r}.$$ This integral defines a path integral since it will depend on the given path we take between the two end points. We will replace the limits with the symbol $c$ in order to indicate that we take a specific countour in space when the force acts on the system. That is the work $W_{n0}$ between two points $\boldsymbol{r}_n$ and $\boldsymbol{r}_0$ is labeled as $$W_{n0}=\frac{1}{2}mv_n^2-\frac{1}{2}mv_0^2=\int_{c}\boldsymbol{F}(\boldsymbol{r},\boldsymbol{v},t)d\boldsymbol{r}.$$ Note that if the force is perpendicular to the displacement, then the force does not affect the kinetic energy.Let us now study some examples of forces and how to find the velocity from the integration over a given path.Thereafter we study how to evaluate an integral numerically.In order to study the work- energy, we will normally need to performa numerical integration, unless we can integrate analytically. Here wepresent some of the simpler methods such as the **rectangle** rule, the **trapezoidal** rule and higher-order methods like the Simpson family of methods. Example of an Electron moving along a SurfaceAs an example, let us consider the following case.We have classical electron which moves in the $x$-direction along a surface. The force from the surface is $$\boldsymbol{F}(x)=-F_0\sin{(\frac{2\pi x}{b})}\boldsymbol{e}_1.$$ The constant $b$ represents the distance between atoms at the surface of the material, $F_0$ is a constant and $x$ is the position of the electron.Using the work-energy theorem we can find the work $W$ done when moving an electron from a position $x_0$ to a final position $x$ through the integral $$W=\int_{x_0}^x \boldsymbol{F}(x')dx' = -\int_{x_0}^x F_0\sin{(\frac{2\pi x'}{b})} dx',$$ which results in $$W=\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right].$$ If we now use the work-energy theorem we can find the the velocity at a final position $x$ by setting upthe differences in kinetic energies between the final position and the initial position $x_0$.We have that the work done by the force is given by the difference in kinetic energies as $$W=\frac{1}{2}m\left(v^2(x)-v^2(x_0)\right)=\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right],$$ and labeling $v(x_0)=v_0$ (and assuming we know the initial velocity) we have $$v(x)=\pm \sqrt{v_0^2+\frac{F_0b}{m\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right]},$$ Choosing $x_0=0$m and $v_0=0$m/s we can simplify the above equation to $$v(x)=\pm \sqrt{\frac{F_0b}{m\pi}\left[\cos{(\frac{2\pi x}{b})}-1\right]},$$ Harmonic OscillationsAnother well-known force (and we will derive when we come to HarmonicOscillations) is the case of a sliding block attached to a wallthrough a spring. The block is attached to a spring with springconstant $k$. The other end of the spring is attached to the wall atthe origin $x=0$. We assume the spring has an equilibrium length$L_0$.The force $F_x$ from the spring on the block is then $$F_x=-k(x-L_0).$$ The position $x$ where the spring force is zero is called the equilibrium position. In our case this is$x=L_0$.We can now compute the work done by this force when we move our block from an initial position $x_0$ to a final position $x$ $$W=\int_{x_0}^{x}F_xdx'=-k\int_{x_0}^{x}(x'-L_0)dx'=\frac{1}{2}k(x_0-L_0)^2-\frac{1}{2}k(x-L_0)^2.$$ If we now bring back the definition of the work-energy theorem in terms of the kinetic energy we have $$W=\frac{1}{2}mv^2(x)-\frac{1}{2}mv_0^2=\frac{1}{2}k(x_0-L_0)^2-\frac{1}{2}k(x-L_0)^2,$$ which we rewrite as $$\frac{1}{2}mv^2(x)+\frac{1}{2}k(x-L_0)^2=\frac{1}{2}mv_0^2+\frac{1}{2}k(x_0-L_0)^2.$$ What does this mean? The total energy, which is the sum of potential and kinetic energy, is conserved.Wow, this sounds interesting. We will analyze this next week in more detail when we study energy, momentum and angular momentum conservation. Numerical IntegrationLet us now see how we could have solved the above integral numerically.There are several numerical algorithms for finding an integralnumerically. The more familiar ones like the rectangular rule or thetrapezoidal rule have simple geometric interpretations.Let us look at the mathematical details of what are called equal-step methods, also known as Newton-Cotes quadrature. Newton-Cotes Quadrature or equal-step methodsThe integral $$\begin{equation} I=\int_a^bf(x) dx\label{eq:integraldef} \tag{1}\end{equation}$$ has a very simple meaning. The integral is thearea enscribed by the function $f(x)$ starting from $x=a$ to $x=b$. It is subdivided in several smaller areas whose evaluation is to be approximated by different techniques. The areas under the curve can for example be approximated by rectangular boxes or trapezoids.In considering equal step methods, our basic approach is that of approximatinga function $f(x)$ with a polynomial of at most degree $N-1$, given $N$ integration points. If our polynomial is of degree $1$,the function will be approximated with $f(x)\approx a_0+a_1x$. The algorithm for these integration methods is rather simple, and the number of approximations perhaps unlimited!* Choose a step size $h=(b-a)/N$ where $N$ is the number of steps and $a$ and $b$ the lower and upper limits of integration.* With a given step length we rewrite the integral as $$\int_a^bf(x) dx= \int_a^{a+h}f(x)dx + \int_{a+h}^{a+2h}f(x)dx+\dots \int_{b-h}^{b}f(x)dx.$$ * The strategy then is to find a reliable polynomial approximation for $f(x)$ in the various intervals. Choosing a given approximation for $f(x)$, we obtain a specific approximation to the integral.* With this approximation to $f(x)$ we perform the integration by computing the integrals over all subintervals.One possible strategy then is to find a reliable polynomial expansion for $f(x)$ in the smallersubintervals. Consider for example evaluating $$\int_a^{a+2h}f(x)dx,$$ which we rewrite as $$\begin{equation}\int_a^{a+2h}f(x)dx=\int_{x_0-h}^{x_0+h}f(x)dx.\label{eq:hhint} \tag{2}\end{equation}$$ We have chosen a midpoint $x_0$ and have defined $x_0=a+h$. The rectangle methodA very simple approach is the so-called midpoint or rectangle method.In this case the integration area is split in a given number of rectangles with length $h$ and height given by the mid-point value of the function. This gives the following simple rule for approximating an integral $$\begin{equation}I=\int_a^bf(x) dx \approx h\sum_{i=1}^N f(x_{i-1/2}), \label{eq:rectangle} \tag{3}\end{equation}$$ where $f(x_{i-1/2})$ is the midpoint value of $f$ for a given rectangle. We will discuss its truncation error below. It is easy to implement this algorithm, as shown belowThe correct mathematical expression for the local error for the rectangular rule $R_i(h)$ for element $i$ is $$\int_{-h}^hf(x)dx - R_i(h)=-\frac{h^3}{24}f^{(2)}(\xi),$$ and the global error reads $$\int_a^bf(x)dx -R_h(f)=-\frac{b-a}{24}h^2f^{(2)}(\xi),$$ where $R_h$ is the result obtained with rectangular rule and $\xi \in [a,b]$.We go back to our simple example above and set $F_0=b=1$ and choose $x_0=0$ and $x=1/2$, and have $$W=\frac{1}{\pi}.$$ The code here computes the integral using the rectangle rule and $n=100$ integration points we have a relative error of$10^{-5}$.
###Code
from math import sin, pi
import numpy as np
from sympy import Symbol, integrate
# function for the Rectangular rule
def Rectangular(a,b,f,n):
h = (b-a)/float(n)
s = 0
for i in range(0,n,1):
x = (i+0.5)*h
s = s+ f(x)
return h*s
# function to integrate
def function(x):
return sin(2*pi*x)
# define integration limits and integration points
a = 0.0; b = 0.5;
n = 100
Exact = 1./pi
print("Relative error= ", abs( (Rectangular(a,b,function,n)-Exact)/Exact))
###Output
_____no_output_____
###Markdown
The trapezoidal ruleThe other integral gives $$\int_{x_0-h}^{x_0}f(x)dx=\frac{h}{2}\left(f(x_0) + f(x_0-h)\right)+O(h^3),$$ and adding up we obtain $$\begin{equation} \int_{x_0-h}^{x_0+h}f(x)dx=\frac{h}{2}\left(f(x_0+h) + 2f(x_0) + f(x_0-h)\right)+O(h^3),\label{eq:trapez} \tag{4}\end{equation}$$ which is the well-known trapezoidal rule. Concerning the error in the approximation made,$O(h^3)=O((b-a)^3/N^3)$, you should note that this is the local error. Since we are splitting the integral from$a$ to $b$ in $N$ pieces, we will have to perform approximately $N$ such operations.This means that the *global error* goes like $\approx O(h^2)$. The trapezoidal reads then $$\begin{equation} I=\int_a^bf(x) dx=h\left(f(a)/2 + f(a+h) +f(a+2h)+ \dots +f(b-h)+ f_{b}/2\right),\label{eq:trapez1} \tag{5}\end{equation}$$ with a global error which goes like $O(h^2)$. Hereafter we use the shorthand notations $f_{-h}=f(x_0-h)$, $f_{0}=f(x_0)$and $f_{h}=f(x_0+h)$.The correct mathematical expression for the local error for the trapezoidal rule is $$\int_a^bf(x)dx -\frac{b-a}{2}\left[f(a)+f(b)\right]=-\frac{h^3}{12}f^{(2)}(\xi),$$ and the global error reads $$\int_a^bf(x)dx -T_h(f)=-\frac{b-a}{12}h^2f^{(2)}(\xi),$$ where $T_h$ is the trapezoidal result and $\xi \in [a,b]$.The trapezoidal rule is easy to implement numerically through the following simple algorithm * Choose the number of mesh points and fix the step length. * calculate $f(a)$ and $f(b)$ and multiply with $h/2$. * Perform a loop over $n=1$ to $n-1$ ($f(a)$ and $f(b)$ are known) and sum up the terms $f(a+h) +f(a+2h)+f(a+3h)+\dots +f(b-h)$. Each step in the loop corresponds to a given value $a+nh$. * Multiply the final result by $h$ and add $hf(a)/2$ and $hf(b)/2$.We use the same function and integrate now using the trapoezoidal rule.
###Code
import numpy as np
from sympy import Symbol, integrate
# function for the trapezoidal rule
def Trapez(a,b,f,n):
h = (b-a)/float(n)
s = 0
x = a
for i in range(1,n,1):
x = x+h
s = s+ f(x)
s = 0.5*(f(a)+f(b)) +s
return h*s
# function to integrate
def function(x):
return sin(2*pi*x)
# define integration limits and integration points
a = 0.0; b = 0.5;
n = 100
Exact = 1./pi
print("Relative error= ", abs( (Trapez(a,b,function,n)-Exact)/Exact))
###Output
_____no_output_____
###Markdown
Simpsons' ruleInstead of using the above first-order polynomials approximations for $f$, we attempt at using a second-order polynomials.In this case we need three points in order to define a second-order polynomial approximation $$f(x) \approx P_2(x)=a_0+a_1x+a_2x^2.$$ Using again Lagrange's interpolation formula we have $$P_2(x)=\frac{(x-x_0)(x-x_1)}{(x_2-x_0)(x_2-x_1)}y_2+ \frac{(x-x_0)(x-x_2)}{(x_1-x_0)(x_1-x_2)}y_1+ \frac{(x-x_1)(x-x_2)}{(x_0-x_1)(x_0-x_2)}y_0.$$ Inserting this formula in the integral of Eq. ([2](eq:hhint)) we obtain $$\int_{-h}^{+h}f(x)dx=\frac{h}{3}\left(f_h + 4f_0 + f_{-h}\right)+O(h^5),$$ which is Simpson's rule. Note that the improved accuracy in the evaluation ofthe derivatives gives a better error approximation, $O(h^5)$ vs.\ $O(h^3)$ .But this is again the *local error approximation*. Using Simpson's rule we can easily computethe integral of Eq. ([1](eq:integraldef)) to be $$\begin{equation} I=\int_a^bf(x) dx=\frac{h}{3}\left(f(a) + 4f(a+h) +2f(a+2h)+ \dots +4f(b-h)+ f_{b}\right),\label{eq:simpson} \tag{6}\end{equation}$$ with a global error which goes like $O(h^4)$. More formal expressions for the local and global errors are for the local error $$\int_a^bf(x)dx -\frac{b-a}{6}\left[f(a)+4f((a+b)/2)+f(b)\right]=-\frac{h^5}{90}f^{(4)}(\xi),$$ and for the global error $$\int_a^bf(x)dx -S_h(f)=-\frac{b-a}{180}h^4f^{(4)}(\xi).$$ with $\xi\in[a,b]$ and $S_h$ the results obtained with Simpson's method.The method can easily be implemented numerically through the following simple algorithm * Choose the number of mesh points and fix the step. * calculate $f(a)$ and $f(b)$ * Perform a loop over $n=1$ to $n-1$ ($f(a)$ and $f(b)$ are known) and sum up the terms $4f(a+h) +2f(a+2h)+4f(a+3h)+\dots +4f(b-h)$. Each step in the loop corresponds to a given value $a+nh$. Odd values of $n$ give $4$ as factor while even values yield $2$ as factor. * Multiply the final result by $\frac{h}{3}$.
###Code
from math import sin, pi
import numpy as np
from sympy import Symbol, integrate
# function for the trapezoidal rule
def Simpson(a,b,f,n):
h = (b-a)/float(n)
sum = f(a)/float(2);
for i in range(1,n):
sum = sum + f(a+i*h)*(3+(-1)**(i+1))
sum = sum + f(b)/float(2)
return sum*h/3.0
# function to integrate
def function(x):
return sin(2*pi*x)
# define integration limits and integration points
a = 0.0; b = 0.5;
n = 100
Exact = 1./pi
print("Relative error= ", abs( (Simpson(a,b,function,n)-Exact)/Exact))
###Output
_____no_output_____
###Markdown
We see that Simpson's rule gives a much better estimation of the relative error with the same amount of points as we had for the Rectangle rule and the Trapezoidal rule. Symbolic integrationWe could also use the symbolic mathematics. Here Python comes to our rescue with [SymPy](https://www.sympy.org/en/index.html), which is a Python library for symbolic mathematics.Here's an example on how you could use **Sympy** where we compare the symbolic calculation with anintegration of a function $f(x)$ by the Trapezoidal rule.Here we show anexample code that evaluates the integral$\int_0^1 dx x^2 = 1/3$.The following code for the trapezoidal rule allows you to plot the relative error by comparing with the exact result. By increasing to $10^8$ points one arrives at a region where numerical errors start to accumulate.
###Code
%matplotlib inline
from math import log10
import numpy as np
from sympy import Symbol, integrate
import matplotlib.pyplot as plt
# function for the trapezoidal rule
def Trapez(a,b,f,n):
h = (b-a)/float(n)
s = 0
x = a
for i in range(1,n,1):
x = x+h
s = s+ f(x)
s = 0.5*(f(a)+f(b)) +s
return h*s
# function to compute pi
def function(x):
return x*x
# define integration limits
a = 0.0; b = 1.0;
# find result from sympy
# define x as a symbol to be used by sympy
x = Symbol('x')
exact = integrate(function(x), (x, a, b))
# set up the arrays for plotting the relative error
n = np.zeros(9); y = np.zeros(9);
# find the relative error as function of integration points
for i in range(1, 8, 1):
npts = 10**i
result = Trapez(a,b,function,npts)
RelativeError = abs((exact-result)/exact)
n[i] = log10(npts); y[i] = log10(RelativeError);
plt.plot(n,y, 'ro')
plt.xlabel('n')
plt.ylabel('Relative error')
plt.show()
###Output
_____no_output_____
###Markdown
Energy ConservationEnergy conservation is most convenient as a strategy for addressingproblems where time does not appear. For example, a particle goesfrom position $x_0$ with speed $v_0$, to position $x_f$; what is itsnew speed? However, it can also be applied to problems where timedoes appear, such as in solving for the trajectory $x(t)$, orequivalently $t(x)$.Energy is conserved in the case where the potential energy, $V(\boldsymbol{r})$, depends only on position, and not on time. The force is determined by $V$, $$\begin{equation}\boldsymbol{F}(\boldsymbol{r})=-\boldsymbol{\nabla} V(\boldsymbol{r}).\label{_auto1} \tag{7}\end{equation}$$ We say a force is conservative if it satisfies the following conditions:1. The force $\boldsymbol{F}$ acting on an object only depends on the position $\boldsymbol{r}$, that is $\boldsymbol{F}=\boldsymbol{F}(\boldsymbol{r})$.2. For any two points $\boldsymbol{r}_1$ and $\boldsymbol{r}_2$, the work done by the force $\boldsymbol{F}$ on the displacement between these two points is independent of the path taken.3. Finally, the **curl** of the force is zero $\boldsymbol{\nabla}\times\boldsymbol{F}=0$.The energy $E$ of a given system is defined as the sum of kinetic and potential energies, $$E=K+V(\boldsymbol{r}).$$ We define the potential energy at a point $\boldsymbol{r}$ as the negative work done from a starting point $\boldsymbol{r}_0$ to a final point $\boldsymbol{r}$ $$V(\boldsymbol{r})=-W(\boldsymbol{r}_0\rightarrow\boldsymbol{r})= -\int_{\boldsymbol{r}_0}^{\boldsymbol{r}}d\boldsymbol{r}'\boldsymbol{F}(\boldsymbol{r}').$$ If the potential depends on the path taken between these two points there is no unique potential.As an example, let us study a classical electron which moves in the $x$-direction along a surface. The force from the surface is $$\boldsymbol{F}(x)=-F_0\sin{(\frac{2\pi x}{b})}\boldsymbol{e}_1.$$ The constant $b$ represents the distance between atoms at the surface of the material, $F_0$ is a constant and $x$ is the position of the electron.This is indeed a conservative force since it depends only on positionand its **curl** is zero, that is $-\boldsymbol{\nabla}\times \boldsymbol{F}=0$. This means that energy is conserved and theintegral over the work done by the force is independent of the pathtaken. Using the work-energy theorem we can find the work $W$ done whenmoving an electron from a position $x_0$ to a final position $x$through the integral $$W=\int_{x_0}^x \boldsymbol{F}(x')dx' = -\int_{x_0}^x F_0\sin{(\frac{2\pi x'}{b})} dx',$$ which results in $$W=\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right].$$ Since this is related to the change in kinetic energy we have, with $v_0$ being the initial velocity at a time $t_0$, $$v = \pm\sqrt{\frac{2}{m}\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right]+v_0^2}.$$ The potential energy, due to energy conservation is $$V(x)=V(x_0)+\frac{1}{2}mv_0^2-\frac{1}{2}mv^2,$$ with $v$ given by the velocity from above.We can now, in order to find a more explicit expression for thepotential energy at a given value $x$, define a zero level value forthe potential. The potential is defined, using the work-energytheorem, as $$V(x)=V(x_0)+\int_{x_0}^x (-F(x'))dx',$$ and if you recall the definition of the indefinite integral, we can rewrite this as $$V(x)=\int (-F(x'))dx'+C,$$ where $C$ is an undefined constant. The force is defined as thegradient of the potential, and in that case the undefined constantvanishes. The constant does not affect the force we derive from thepotential.We have then $$V(x)=V(x_0)-\int_{x_0}^x \boldsymbol{F}(x')dx',$$ which results in $$V(x)=-\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right]+V(x_0).$$ We can now define $$-\frac{F_0b}{2\pi}\cos{(\frac{2\pi x_0}{b})}=V(x_0),$$ which gives $$V(x)=-\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}\right].$$ We have defined work as the energy resulting from a net force actingon an object (or sseveral objects), that is $$W(\boldsymbol{r}\rightarrow \boldsymbol{r}+d\boldsymbol{r})= \boldsymbol{F}(\boldsymbol{r})d\boldsymbol{r}.$$ If we write out this for each component we have $$W(\boldsymbol{r}\rightarrow \boldsymbol{r}+d\boldsymbol{r})=\boldsymbol{F}(\boldsymbol{r})d\boldsymbol{r}=F_xdx+F_ydy+F_zdz.$$ The work done from an initial position to a final one defines also the difference in potential energies $$W(\boldsymbol{r}\rightarrow \boldsymbol{r}+d\boldsymbol{r})=-\left[V(\boldsymbol{r}+d\boldsymbol{r})-V(\boldsymbol{r})\right].$$ We can write out the differences in potential energies as $$V(\boldsymbol{r}+d\boldsymbol{r})-V(\boldsymbol{r})=V(x+dx,y+dy,z+dz)-V(x,y,z)=dV,$$ and using the expression the differential of a multi-variable function $f(x,y,z)$ $$df=\frac{\partial f}{\partial x}dx+\frac{\partial f}{\partial y}dy+\frac{\partial f}{\partial z}dz,$$ we can write the expression for the work done as $$W(\boldsymbol{r}\rightarrow \boldsymbol{r}+d\boldsymbol{r})=-dV=-\left[\frac{\partial V}{\partial x}dx+\frac{\partial V}{\partial y}dy+\frac{\partial V}{\partial z}dz \right].$$ Comparing the last equation with $$W(\boldsymbol{r}\rightarrow \boldsymbol{r}+d\boldsymbol{r})=F_xdx+F_ydy+F_zdz,$$ we have $$F_xdx+F_ydy+F_zdz=-\left[\frac{\partial V}{\partial x}dx+\frac{\partial V}{\partial y}dy+\frac{\partial V}{\partial z}dz \right],$$ leading to $$F_x=-\frac{\partial V}{\partial x},$$ and $$F_y=-\frac{\partial V}{\partial y},$$ and $$F_z=-\frac{\partial V}{\partial z},$$ or just $$\boldsymbol{F}=-\frac{\partial V}{\partial x}\boldsymbol{e}_1-\frac{\partial V}{\partial y}\boldsymbol{e}_2-\frac{\partial V}{\partial z}\boldsymbol{e}_3=-\boldsymbol{\nabla}V(\boldsymbol{r}).$$ And this connection is the one we wanted to show. Net EnergyThe net energy, $E=V+K$ where $K$ is the kinetic energy, is then conserved, $$\begin{eqnarray}\frac{d}{dt}(K+V)&=&\frac{d}{dt}\left(\frac{m}{2}(v_x^2+v_y^2+v_z^2)+V(\boldsymbol{r})\right)\\\nonumber&=&m\left(v_x\frac{dv_x}{dt}+v_y\frac{dv_y}{dt}+v_z\frac{dv_z}{dt}\right)+\partial_xV\frac{dx}{dt}+\partial_yV\frac{dy}{dt}+\partial_zV\frac{dz}{dt}\\\nonumber&=&v_xF_x+v_yF_y+v_zF_z-F_xv_x-F_yv_y-F_zv_z=0.\end{eqnarray}$$ The same proof can be written more compactly with vector notation, $$\begin{eqnarray}\frac{d}{dt}\left(\frac{m}{2}v^2+V(\boldsymbol{r})\right)&=&m\boldsymbol{v}\cdot\dot{\boldsymbol{v}}+\boldsymbol{\nabla} V(\boldsymbol{r})\cdot\dot{\boldsymbol{r}}\\\nonumber&=&\boldsymbol{v}\cdot\boldsymbol{F}-\boldsymbol{F}\cdot\boldsymbol{v}=0.\end{eqnarray}$$ Inverting the expression for kinetic energy, $$\begin{equation}v=\sqrt{2K/m}=\sqrt{2(E-V)/m},\label{_auto2} \tag{8}\end{equation}$$ allows one to solve for the one-dimensional trajectory $x(t)$, by finding $t(x)$, $$\begin{equation}t=\int_{x_0}^x \frac{dx'}{v(x')}=\int_{x_0}^x\frac{dx'}{\sqrt{2(E-V(x'))/m}}.\label{_auto3} \tag{9}\end{equation}$$ Note this would be much more difficult in higher dimensions, becauseyou would have to determine which points, $x,y,z$, the particles mightreach in the trajectory, whereas in one dimension you can typicallytell by simply seeing whether the kinetic energy is positive at everypoint between the old position and the new position. The Earth-Sun systemWe will now venture into a study of a system which is energyconserving. The aim is to see if we (since it is not possible to solvethe general equations analytically) we can develop stable numericalalgorithms whose results we can trust!We solve the equations of motion numerically. We will also computequantities like the energy numerically.We start with a simpler case first, the Earth-Sun system in two dimensions only. The gravitational force $F_G$ on the earth from the sun is $$\boldsymbol{F}_G=-\frac{GM_{\odot}M_E}{r^3}\boldsymbol{r},$$ where $G$ is the gravitational constant, $$M_E=6\times 10^{24}\mathrm{Kg},$$ the mass of Earth, $$M_{\odot}=2\times 10^{30}\mathrm{Kg},$$ the mass of the Sun and $$r=1.5\times 10^{11}\mathrm{m},$$ is the distance between Earth and the Sun. The latter defines what we call an astronomical unit **AU**.From Newton's second law we have then for the $x$ direction $$\frac{d^2x}{dt^2}=-\frac{F_{x}}{M_E},$$ and $$\frac{d^2y}{dt^2}=-\frac{F_{y}}{M_E},$$ for the $y$ direction.Here we will use that $x=r\cos{(\theta)}$, $y=r\sin{(\theta)}$ and $$r = \sqrt{x^2+y^2}.$$ We can rewrite $$F_{x}=-\frac{GM_{\odot}M_E}{r^2}\cos{(\theta)}=-\frac{GM_{\odot}M_E}{r^3}x,$$ and $$F_{y}=-\frac{GM_{\odot}M_E}{r^2}\sin{(\theta)}=-\frac{GM_{\odot}M_E}{r^3}y,$$ for the $y$ direction.We can rewrite these two equations $$F_{x}=-\frac{GM_{\odot}M_E}{r^2}\cos{(\theta)}=-\frac{GM_{\odot}M_E}{r^3}x,$$ and $$F_{y}=-\frac{GM_{\odot}M_E}{r^2}\sin{(\theta)}=-\frac{GM_{\odot}M_E}{r^3}y,$$ as four first-order coupled differential equations 81 <<<!!MATH_BLOCK 82 <<<!!MATH_BLOCK 83 <<<!!MATH_BLOCK $$\frac{dy}{dt}=v_y.$$ The four coupled differential equations 85 <<<!!MATH_BLOCK 86 <<<!!MATH_BLOCK 87 <<<!!MATH_BLOCK $$\frac{dy}{dt}=v_y,$$ can be turned into dimensionless equations or we can introduce astronomical units with $1$ AU = $1.5\times 10^{11}$. Using the equations from circular motion (with $r =1\mathrm{AU}$) $$\frac{M_E v^2}{r} = F = \frac{GM_{\odot}M_E}{r^2},$$ we have $$GM_{\odot}=v^2r,$$ and using that the velocity of Earth (assuming circular motion) is$v = 2\pi r/\mathrm{yr}=2\pi\mathrm{AU}/\mathrm{yr}$, we have $$GM_{\odot}= v^2r = 4\pi^2 \frac{(\mathrm{AU})^3}{\mathrm{yr}^2}.$$ The four coupled differential equations can then be discretized using Euler's method as (with step length $h$) 92 <<<!!MATH_BLOCK 93 <<<!!MATH_BLOCK 94 <<<!!MATH_BLOCK $$y_{i+1}=y_i+hv_{y,i},$$ The code here implements Euler's method for the Earth-Sun system using a more compact way of representing the vectors. Alternatively, you could have spelled out all the variables $v_x$, $v_y$, $x$ and $y$ as one-dimensional arrays.
###Code
# Common imports
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
DeltaT = 0.001
#set up arrays
tfinal = 10 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Start integrating using Euler's method
for i in range(n-1):
# Set up the acceleration
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using Euler's forward method
v[i+1] = v[i] + DeltaT*a
r[i+1] = r[i] + DeltaT*v[i]
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
#ax.set_xlim(0, tfinal)
ax.set_ylabel('y[AU]')
ax.set_xlabel('x[AU]')
ax.plot(r[:,0], r[:,1])
fig.tight_layout()
save_fig("EarthSunEuler")
plt.show()
###Output
_____no_output_____
###Markdown
We notice here that Euler's method doesn't give a stable orbit. Itmeans that we cannot trust Euler's method. In a deeper way, as we willsee in homework 5, Euler's method does not conserve energy. It is anexample of an integrator which is not[symplectic](https://en.wikipedia.org/wiki/Symplectic_integrator).Here we present thus two methods, which with simple changes allow us to avoid these pitfalls. The simplest possible extension is the so-called Euler-Cromer method.The changes we need to make to our code are indeed marginal here.We need simply to replace
###Code
r[i+1] = r[i] + DeltaT*v[i]
###Output
_____no_output_____
###Markdown
in the above code with the velocity at the new time $t_{i+1}$
###Code
r[i+1] = r[i] + DeltaT*v[i+1]
###Output
_____no_output_____
###Markdown
By this simple caveat we get stable orbits.Below we derive the Euler-Cromer method as well as one of the most utlized algorithms for sovling the above type of problems, the so-called Velocity-Verlet method. Let us repeat Euler's method.We have a differential equation $$\begin{equation}y'(t_i)=f(t_i,y_i) \label{_auto4} \tag{10}\end{equation}$$ and if we truncate at the first derivative, we have from the Taylor expansion $$\begin{equation}y_{i+1}=y(t_i) + (\Delta t) f(t_i,y_i) + O(\Delta t^2), \label{eq:euler} \tag{11}\end{equation}$$ which when complemented with $t_{i+1}=t_i+\Delta t$ formsthe algorithm for the well-known Euler method. Note that at every step we make an approximation errorof the order of $O(\Delta t^2)$, however the total error is the sum over allsteps $N=(b-a)/(\Delta t)$ for $t\in [a,b]$, yielding thus a global error which goes like$NO(\Delta t^2)\approx O(\Delta t)$. To make Euler's method more precise we can obviouslydecrease $\Delta t$ (increase $N$), but this can lead to loss of numerical precision.Euler's method is not recommended for precision calculation,although it is handy to use in order to get a firstview on how a solution may look like.Euler's method is asymmetric in time, since it uses information about the derivative at the beginningof the time interval. This means that we evaluate the position at $y_1$ using the velocityat $v_0$. A simple variation is to determine $x_{n+1}$ using the velocity at$v_{n+1}$, that is (in a slightly more generalized form) $$\begin{equation} y_{n+1}=y_{n}+ v_{n+1}+O(\Delta t^2)\label{_auto5} \tag{12}\end{equation}$$ and $$\begin{equation}v_{n+1}=v_{n}+(\Delta t) a_{n}+O(\Delta t^2).\label{_auto6} \tag{13}\end{equation}$$ The acceleration $a_n$ is a function of $a_n(y_n, v_n, t_n)$ and needs to be evaluatedas well. This is the Euler-Cromer method. Deriving the Velocity-Verlet MethodLet us stay with $x$ (position) and $v$ (velocity) as the quantities we are interested in.We have the Taylor expansion for the position given by $$x_{i+1} = x_i+(\Delta t)v_i+\frac{(\Delta t)^2}{2}a_i+O((\Delta t)^3).$$ The corresponding expansion for the velocity is $$v_{i+1} = v_i+(\Delta t)a_i+\frac{(\Delta t)^2}{2}v^{(2)}_i+O((\Delta t)^3).$$ Via Newton's second law we have normally an analytical expression for the derivative of the velocity, namely $$a_i= \frac{d^2 x}{dt^2}\vert_{i}=\frac{d v}{dt}\vert_{i}= \frac{F(x_i,v_i,t_i)}{m}.$$ If we add to this the corresponding expansion for the derivative of the velocity $$v^{(1)}_{i+1} = a_{i+1}= a_i+(\Delta t)v^{(2)}_i+O((\Delta t)^2)=a_i+(\Delta t)v^{(2)}_i+O((\Delta t)^2),$$ and retain only terms up to the second derivative of the velocity since our error goes as $O(h^3)$, we have $$(\Delta t)v^{(2)}_i\approx a_{i+1}-a_i.$$ We can then rewrite the Taylor expansion for the velocity as $$v_{i+1} = v_i+\frac{(\Delta t)}{2}\left( a_{i+1}+a_{i}\right)+O((\Delta t)^3).$$ Our final equations for the position and the velocity become then $$x_{i+1} = x_i+(\Delta t)v_i+\frac{(\Delta t)^2}{2}a_{i}+O((\Delta t)^3),$$ and $$v_{i+1} = v_i+\frac{(\Delta t)}{2}\left(a_{i+1}+a_{i}\right)+O((\Delta t)^3).$$ Note well that the term $a_{i+1}$ depends on the position at $x_{i+1}$. This means that you need to calculate the position at the updated time $t_{i+1}$ before the computing the next velocity. Note also that the derivative of the velocity at the time$t_i$ used in the updating of the position can be reused in the calculation of the velocity update as well. We can now easily add the Verlet method to our original code as
###Code
DeltaT = 0.01
#set up arrays
tfinal = 10 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Start integrating using the Velocity-Verlet method
for i in range(n-1):
# Set up forces, air resistance FD, note now that we need the norm of the vecto
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using the Velocity-Verlet method
r[i+1] = r[i] + DeltaT*v[i]+0.5*(DeltaT**2)*a
rabs = sqrt(sum(r[i+1]*r[i+1]))
anew = -4*(pi**2)*r[i+1]/(rabs**3)
v[i+1] = v[i] + 0.5*DeltaT*(a+anew)
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
ax.set_ylabel('y[AU]')
ax.set_xlabel('x[AU]')
ax.plot(r[:,0], r[:,1])
fig.tight_layout()
save_fig("EarthSunVV")
plt.show()
###Output
_____no_output_____
###Markdown
You can easily generalize the calculation of the forces by defining a functionwhich takes in as input the various variables. We leave this as a challenge to you. Additional Material: Link between Line Integrals and Conservative forcesThe concept of line integrals plays an important role in our discussion of energy conservation,our definition of potentials and conservative forces.Let us remind ourselves of some the basic elements (most of you mayhave seen this in a calculus course under the general topic of vectorfields).We define a path integration $C$, that is we integratefrom a point $\boldsymbol{r}_1$ to a point $\boldsymbol{r}_2$. Let us assume that the path $C$ is represented by an arc length $s$. In three dimension we have the following representation of $C$ $$\boldsymbol{r}(s)=x(s)\boldsymbol{e}_1+y(s)\boldsymbol{e}_2+z(s)\boldsymbol{e}_3,$$ then our integral of a function $f(x,y,z)$ along the path $C$ is defined as $$\int_Cf(x,y,z)ds=\int_a^bf\left(x(s),y(s),z(s)\right)ds,$$ where the initial and final points are $a$ and $b$, respectively.With the definition of a line integral, we can in tunrn set up thetheorem of independence of integration path.Let us define$f(x,y,z)$, $g(x,y,z)$ and $h(x,y,z)$ to be functions which aredefined and continuous in a domain $D$ in space. Then a line integrallike the above is said to be independent of path in $D$, if for everypair of endpoints $a$ and $b$ in $D$ the value of the integral is thesame for all paths $C$ in $D$ starting from a point $a$ and ending ina point $b$. The integral depends thus only on the integration limitsand not on the path.An expression of the form $$fdx+gdy+hdz,$$ where $f$, $g$ and $h$ are functions defined in $D$, is a called a first-order differential formin three variables.The form is said to be exact if it is the differential $$du= \frac{\partial u}{\partial x}dx+\frac{\partial u}{\partial y}dy+\frac{\partial u}{\partial z}dz,$$ of a differentiable function $u(x,y,z)$ everywhere in $D$, that is $$du=fdx+gdy+hdz.$$ It is said to be exact if and only if we can then set $$f=\frac{\partial u}{\partial x},$$ and $$g=\frac{\partial u}{\partial y},$$ and $$h=\frac{\partial u}{\partial z},$$ everywhere in the domain $D$.In vector language the above means that the differential form $$fdx+gdy+hdz,$$ is exact in $D$ if and only if the vector function (it could be a force, or velocity, acceleration or other vectors we encounter in this course) $$\boldsymbol{F}=f\boldsymbol{e}_1+g\boldsymbol{e}_2+h\boldsymbol{e}_3,$$ is the gradient of a function $u(x,y,z)$ $$\boldsymbol{v}=\boldsymbol{\nabla}u=\frac{\partial u}{\partial x}\boldsymbol{e}_1+\frac{\partial u}{\partial y}\boldsymbol{e}_2+\frac{\partial u}{\partial z}\boldsymbol{e}_3.$$ If this is the case, we can state the path independence theorem whichstates that with functions $f(x,y,z)$, $g(x,y,z)$ and $h(x,y,z)$ that fulfill the aboveexactness conditions, the line integral $$\int_C\left(fdx+gdy+hdz\right),$$ is independent of path in $D$ if and only if the differential form under the integral sign is exact in $D$.This is the path independence theorem. We will not give a proof of the theorem. You can find this in any vector analysis chapter in a mathematics textbook.We note however that the path integral from a point $p$ to a final point $q$ is given by $$\int_p^q\left(fdx+gdy+hdz\right)=\int_p^q\left(\frac{\partial u}{\partial x}dx+\frac{\partial u}{\partial y}dy+\frac{\partial u}{\partial z}dz\right)=\int_p^qdu.$$ Assume now that we have a dependence on a variable $s$ for $x$, $y$ and $z$. We have then $$\int_p^qdu=\int_{s_1}^{s_2}\frac{du}{ds}ds = u(x(s),y(s),z(s))\vert_{s=s_1}^{s=s_2}=u(q)-u(p).$$ This last equation $$\int_p^q\left(fdx+gdy+hdz\right)=u(q)-u(p),$$ is the analogue of the usual formula $$\int_a^bf(x)dx=F(x)\vert_a^b=F(b)-F(a),$$ with $F'(x)=f(x)$.We remember that a the work done by a force$\boldsymbol{F}=f\boldsymbol{e}_1+g\boldsymbol{e}_2+h\boldsymbol{e}_3$ on a displacemnt $d\boldsymbol{r}$is $$W=\int_C\boldsymbol{F}d\boldsymbol{r}=\int_C(fdx+gdy+hdz).$$ From the path independence theorem, we know that this has to result inthe difference between the two endpoints only. This is exact if andonly if the force is the force $\boldsymbol{F}$ is the gradient of a scalarfunction $u$. We call this scalar function, which depends only thepositions $x,y,z$ for the potential energy $V(x,y,z)=V(\boldsymbol{r})$.We have thus $$\boldsymbol{F}(\boldsymbol{r})\propto \boldsymbol{\nabla}V(\boldsymbol{r}),$$ and we define this as $$\boldsymbol{F}(\boldsymbol{r})= -\boldsymbol{\nabla}V(\boldsymbol{r}).$$ Such a force is called **a conservative force**. The above expression can be used to demonstrateenergy conservation.Finally we can define the criterion for exactness and independence ofpath. This theorem states that if $f(x,y,z)$, $g(x,y,z)$ and$h(x,y,z)$ are continuous functions with continuous first partial derivatives in the domain $D$,then the line integral $$\int_C\left(fdx+gdy+hdz\right),$$ is independent of path in $D$ when $$\frac{\partial h}{\partial y}=\frac{\partial g}{\partial z},$$ and $$\frac{\partial f}{\partial z}=\frac{\partial h}{\partial x},$$ and $$\frac{\partial g}{\partial x}=\frac{\partial f}{\partial y}.$$ This leads to the **curl** of $\boldsymbol{F}$ being zero $$\boldsymbol{\nabla}\times\boldsymbol{F}=\boldsymbol{\nabla}\times\left(-\boldsymbol{\nabla}V(\boldsymbol{r})\right)=0!$$ A conservative force $\boldsymbol{F}$ is a defined as the partial derivative of a scalar potential which depends only on the position, $$\boldsymbol{F}(\boldsymbol{r})= -\boldsymbol{\nabla}V(\boldsymbol{r}).$$ This leads to conservation of energy and a path independent line integral as long as the curl of the force is zero, that is $$\boldsymbol{\nabla}\times\boldsymbol{F}=\boldsymbol{\nabla}\times\left(-\boldsymbol{\nabla}V(\boldsymbol{r})\right)=0.$$ Exercises Exercise: Conservation laws, Energy and momentumHow do we define a conservative force?A conservative force is a force whose property is that the total workdone in moving an object between two points is independent of thetaken path. This means that the work on an object under the influenceof a conservative force, is independent on the path of the object. Itdepends only on the spatial degrees of freedom and it is possible toassign a numerical value for the potential at any point. It leads toconservation of energy. The gravitational force is an example of aconservative force.If you wish to read more about conservative forces or not, Feyman's lectures from 1963 are quite interesting.He states for example that **All fundamental forces in nature appear to be conservative**.This statement was made while developing his argument that *there are no nonconservative forces*.You may enjoy the link to [Feynman's lecture](http://www.feynmanlectures.caltech.edu/I_14.html).An important condition for the final work to be independent of the path is that the **curl** of the force is zero, thatis $$\boldsymbol{\nabla} \times \boldsymbol{F}=0$$ Use the work-energy theorem to show that energy is conserved with a conservative force.The work-energy theorem states that the work done $W$ by a force $\boldsymbol{F}$ that moves an object from a position $\boldsymbol{r}_0$ to a new position $\boldsymbol{r}_1$ $$W=\int_{\boldsymbol{r}_0}^{\boldsymbol{r}_1}\boldsymbol{F}\boldsymbol{dr}=\frac{1}{2}mv_1^2-\frac{1}{2}mv_0^2,$$ where $v_1^2$ is the velocity squared at a time $t_1$ and $v_0^2$ the corresponding quantity at a time $t_0$.The work done is thus the difference in kinetic energies. We can rewrite the above equation as $$\frac{1}{2}mv_1^2=\int_{\boldsymbol{r}_0}^{\boldsymbol{r}_1}\boldsymbol{F}\boldsymbol{dr}+\frac{1}{2}mv_0^2,$$ that is the final kinetic energy is equal to the initial kinetic energy plus the work done by the force over a given path from a position $\boldsymbol{r}_0$ at time $t_0$ to a final position position $\boldsymbol{r}_1$ at a later time $t_1$.Assume that you have only internal two-body forces acting on $N$ objects in an isolated system. The force from object $i$ on object $j$ is $\boldsymbol{f}_{ij}$. Show that the linear momentum is conserved.Here we use Newton's third law and assume that our system is onlyaffected by so-called internal forces. This means that the force$\boldsymbol{f}_{ij}$ from object $i$ acting on object $j$ is equal to theforce acting on object $j$ from object $i$ but with opposite sign,that is $\boldsymbol{f}_{ij}=-\boldsymbol{f}_{ji}$.The total linear momentum is defined as $$\boldsymbol{P}=\sum_{i=1}^N\boldsymbol{p}_i=\sum_{i=1}^Nm_i\boldsymbol{v}_i,$$ where $i$ runs over all objects, $m_i$ is the mass of object $i$ and $\boldsymbol{v}_i$ its corresponding velocity.The force acting on object $i$ from all the other objects is (lowercase letters for individual objects and upper case letters for totalquantities) $$\boldsymbol{f}_i=\sum_{j=1}^N\boldsymbol{f}_{ji}.$$ Summing over all objects the net force is $$\sum_{i=1}^N\boldsymbol{f}_i=\sum_{i=1}^N\sum_{j=1;j\ne i}^N\boldsymbol{f}_{ji}.$$ We are summing freely over all objects with the constraint that $i\ne j$ (no self-interactions). We can now manipulate the double sum as $$\sum_{i=1}^N\sum_{j=1;j\ne i}^N\boldsymbol{f}_{ji}=\sum_{i=1}^N\sum_{j>i}^N(\boldsymbol{f}_{ji}+\boldsymbol{f}_{ij}).$$ Convince yourself about this by setting $N=2$ and $N=3$. Nweton's third law says$\boldsymbol{f}_{ij}=-\boldsymbol{f}_{ji}$, which means we have $$\sum_{i=1}^N\sum_{j=1;j\ne i}^N\boldsymbol{f}_{ji}=\sum_{i=1}^N\sum_{j>i}^N(\boldsymbol{f}_{ji}-\boldsymbol{f}_{ji})=0.$$ The total force due to internal degrees of freedom only is thus $0$.If we then use the definition that $$\sum_{i=1}^N\boldsymbol{f}_i=\sum_{i=1}^Nm_i\frac{d\boldsymbol{v}_i}{dt}=\sum_{i=1}^N\frac{d\boldsymbol{p}_i}{dt}=\frac{d \boldsymbol{P}}{dt}=0,$$ where we assumed that $m_i$ is independent of time, we see that time derivative of the total momentum is zero.We say then that the linear momentum is a constant of the motion. It is conserved. Exercise: Conservation of angular momentum* Define angular momentum and the torque for a single object with external forces only. The angular moment $\boldsymbol{l}_i$ for a given object $i$ is defined as $$\boldsymbol{l}_i = \boldsymbol{r}_i \times \boldsymbol{p}_i,$$ where $\boldsymbol{p}_i=m_i\boldsymbol{v}_i$. With external forces only defining the acceleration and the mass being time independent, the momentum is the integral over the external force as function of time, that is $$\boldsymbol{p}_i(t)=\boldsymbol{p}_i(t_0)+\int_{t_0}^t \boldsymbol{f}_i^{\mathrm{ext}}(t')dt'.$$ The torque for one object is $$\boldsymbol{\tau}_i=\frac{d\boldsymbol{l}_i}{dt} = \frac{dt(\boldsymbol{r}_i \times \boldsymbol{p}_i)}{dt}=\boldsymbol{r}_i \times \frac{d\boldsymbol{p}_i}{dt}=\boldsymbol{r}_i \times \boldsymbol{f}_i,$$ * Define angular momentum and the torque for a system with $N$ objects/particles with external and internal forces. The force from object $i$ on object $j$ is $\boldsymbol{F}_{ij}$.The total angular momentum $\boldsymbol{L}$ is defined as $$\boldsymbol{L}=\sum_{i=1}^N\boldsymbol{l}_i = \sum_{i=1}^N\boldsymbol{r}_i \times \boldsymbol{p}_i.$$ and the total torque is (using the expression for one object from 2a) $$\boldsymbol{\tau}=\sum_{i=1}^N\frac{d\boldsymbol{l}_i}{dt} = \sum_{i=1}^N\boldsymbol{r}_i \times \boldsymbol{f}_i.$$ The force acting on one object is $\boldsymbol{f}_i=\boldsymbol{f}_i^{\mathrm{ext}}+\sum_{j=1}^N\boldsymbol{f}_{ji}$.* With internal forces only, what is the mathematical form of the forces that allows for angular momentum to be conserved? Using the results from 1c, we can rewrite without external forces our torque as $$\boldsymbol{\tau}=\sum_{i=1}^N\frac{\boldsymbol{l}_i}{dt} = \sum_{i=1}^N\boldsymbol{r}_i \times \boldsymbol{f}_i=\sum_{i=1}^N(\boldsymbol{r}_i \times \sum_{j=1}^N\boldsymbol{f}_{ji}),$$ which gives $$\boldsymbol{\tau}=\sum_{i=1}^N\sum_{j=1;j\ne i}^N(\boldsymbol{r}_i \times \boldsymbol{f}_{ji}).$$ We can rewrite this as (convince yourself again about this) $$\boldsymbol{\tau}=\sum_{i=1}^N\sum_{j>i}^N(\boldsymbol{r}_i \times \boldsymbol{f}_{ji}+\boldsymbol{r}_j \times \boldsymbol{f}_{ij}),$$ and using Newton's third law we have $$\boldsymbol{\tau}=\sum_{i=1}^N\sum_{j>i}^N(\boldsymbol{r}_i -\boldsymbol{r}_j) \times \boldsymbol{f}_{ji}.$$ If the force is proportional to $\boldsymbol{r}_i -\boldsymbol{r}_j$ then angular momentum is conserved since the cross-product of a vector with itself is zero. We say thus that angular momentum is a constant of the motion. Exsercise: Example of potentialConsider a particle of mass $m$ moving according to the potential $$V(x,y,z)=A\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}.$$ * Is energy conserved? If so, why? In this exercise $A$ and $a$ are constants. The force is given by the derivative of $V$ with respect to the spatial degrees of freedom and since the potential depends only on position, the force is conservative and energy is conserved. Furthermore, the curl of the force is zero. To see this we need first to compute the derivatives of the potential with respect to $x$, $y$ and $z$.We have that $$F_x = -\frac{\partial V}{\partial x}=-\frac{xA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\},$$ and $$F_y = 0,$$ and $$F_z = -\frac{\partial V}{\partial z}=-\frac{zA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}.$$ The components of the **curl** of $\boldsymbol{F}$ are $$(\boldsymbol{\nabla}\times\boldsymbol{F})_x = \frac{\partial F_y}{\partial z}-\frac{\partial F_z}{\partial y}=0,$$ and $$(\boldsymbol{\nabla}\times\boldsymbol{F})_y = \frac{\partial F_x}{\partial z}-\frac{\partial F_z}{\partial x}=\frac{xzA}{a^4}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}-\frac{xzA}{a^4}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}=0,$$ and $$(\boldsymbol{\nabla}\times\boldsymbol{F})_z = \frac{\partial F_y}{\partial x}-\frac{\partial F_x}{\partial y}=0.$$ The force is a conservative one.* Which of the quantities, $p_x,p_y,p_z$ are conserved?Taking the derivatives with respect to time shows that only $p_y$ is conservedWe see this directly from the above expressions for the force, since the derivative with respect to time of the momentum is simply the force. Thus, only the $y$-component of the momentum is conserved, see the expressions above for the forces,For the next exercise, we also need the following derivatives $$\frac{\partial F_x}{\partial x} = \frac{x^2A}{a^4}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}-\frac{A}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\},$$ and $$\frac{\partial F_y}{\partial y} = 0,$$ and $$\frac{\partial F_z}{\partial z} = \frac{z^2A}{a^4}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}-\frac{A}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\},$$ * Which of the quantities, $L_x,L_y,L_z$ are conserved?Using that $\boldsymbol{L}=\boldsymbol{r}\times\boldsymbol{p}$ and that $$\frac{d\boldsymbol{L}}{dt}=\boldsymbol{r}\times\boldsymbol{F},$$ we have that the different components are $$(\boldsymbol{r}\times\boldsymbol{F})_x = zF_y-yF_z=\frac{yzA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}.$$ and $$(\boldsymbol{r}\times\boldsymbol{F})_y = xF_z-zF_x=-\frac{xzA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}+\frac{xzA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}=0,$$ and $$(\boldsymbol{r}\times\boldsymbol{F})_z = xF_y-yF_x=\frac{yxA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}.$$ Only $L_y$ is conserved. Exercise: forces and potentialsA particle of mass $m$ has velocity $v=\alpha/x$, where $x$ is its displacement.* Find the force $F(x)$ responsible for the motion.Here, since the force is assumed to be conservative (only dependence on $x$), we can use energy conservation.Assuming that the total energy at $t=0$ is $E_0$, we have $$E_0=V(x)+\frac{1}{2}mv^2=V(x)+\frac{1}{2}m\frac{\alpha^2}{x^2}.$$ Taking the derivative wrt $x$ we have $$\frac{dV}{dx}-m\frac{\alpha^2}{x^3}=0,$$ and since $F(x)=-dV/dx$ we have $$F(x)=-m\frac{\alpha^2}{x^3}.$$ A particle is thereafter under the influence of a force $F=-kx+kx^3/\alpha^2$, where $k$ and $\alpha$ are constants and $k$ is positive.* Determine $V(x)$ and discuss the motion. It can be convenient here to make a sketch/plot of the potential as function of $x$.We assume that the potential is zero at say $x=0$. Integrating the force from zero to $x$ gives $$V(x) = -\int_0^x F(x')dx'=\frac{kx^2}{2}-\frac{kx^4}{4\alpha^2}.$$ The following code plots the potential. We have chosen values of $\alpha=k=1.0$. Feel free to experiment with other values. We plot $V(x)$ for a domain of $x\in [-2,2]$.
###Code
import numpy as np
import matplotlib.pyplot as plt
import math
x0= -2.0
xn = 2.1
Deltax = 0.1
alpha = 1.0
k = 1.0
#set up arrays
x = np.arange(x0,xn,Deltax)
n = np.size(x)
V = np.zeros(n)
V = 0.5*k*x*x-0.25*k*(x**4)/(alpha*alpha)
plt.plot(x, V)
plt.xlabel("x")
plt.ylabel("V")
plt.show()
###Output
_____no_output_____
###Markdown
From the plot here (with the chosen parameters) 1. we see that with a given initial velocity we can overcome the potential energy barrierand leave the potential well for good.1. If the initial velocity is smaller (see next exercise) than a certain value, it will remain trapped in the potential well and oscillate back and forth around $x=0$. This is where the potential has its minimum value. 2. If the kinetic energy at $x=0$ equals the maximum potential energy, the object will oscillate back and forth between the minimum potential energy at $x=0$ and the turning points where the kinetic energy turns zero. These are the so-called non-equilibrium points. * What happens when the energy of the particle is $E=(1/4)k\alpha^2$? Hint: what is the maximum value of the potential energy?From the figure we see thatthe potential has a minimum at at $x=0$ then rises until $x=\alpha$ before falling off again. The maximumpotential, $V(x\pm \alpha) = k\alpha^2/4$. If the energy is higher, the particle cannot be contained in thewell. The turning points are thus defined by $x=\pm \alpha$. And from the previous plot you can easily see that this is the case ($\alpha=1$ in the abovementioned Python code). Exercise: Work-energy theorem and conservation lawsThis exercise was partly discussed above. We will study a classical electron which moves in the $x$-direction along a surface. The force from the surface is $$\boldsymbol{F}(x)=-F_0\sin{(\frac{2\pi x}{b})}\boldsymbol{e}_x.$$ The constant $b$ represents the distance between atoms at the surface of the material, $F_0$ is a constant and $x$ is the position of the electron.* Is this a conservative force? And if so, what does that imply?This is indeed a conservative force since it depends only on position and its **curl** is zero. This means that energy is conserved and the integral over the work done by the force is independent of the path taken. * Use the work-energy theorem to find the velocity $v(x)$. Using the work-energy theorem we can find the work $W$ done when moving an electron from a position $x_\0$ to a final position $x$ through the integral $$W=\int_{x_0}^x \boldsymbol{F}(x')dx' = -\int_{x_0}^x F_0\sin{(\frac{2\pi x'}{b})} dx',$$ which results in $$W=\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right].$$ Since this is related to the change in kinetic energy we have, with $v_0$ being the initial velocity at a time $t_0$, $$v = \pm\sqrt{\frac{2}{m}\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right]+v_0^2}.$$ * With the above expression for the force, find the potential energy.The potential energy, due to energy conservation is $$V(x)=V(x_0)+\frac{1}{2}mv_0^2-\frac{1}{2}mv^2,$$ with $v$ given by the previous answer. We can now, in order to find a more explicit expression for the potential energy at a given value $x$, define a zero level value for the potential. The potential is defined , using the work-energy theorem , as $$V(x)=V(x_0)+\int_{x_0}^x (-F(x'))dx',$$ and if you recall the definition of the indefinite integral, we can rewrite this as $$V(x)=\int (-F(x'))dx'+C,$$ where $C$ is an undefined constant. The force is defined as the gradient of the potential, and in that case the undefined constant vanishes. The constant does not affect the force we derive from the potential.We have then $$V(x)=V(x_0)-\int_{x_0}^x \boldsymbol{F}(x')dx',$$ which results in $$V(x)=\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right]+V(x_0).$$ We can now define $$\frac{F_0b}{2\pi}\cos{(\frac{2\pi x_0}{b})}=V(x_0),$$ which gives $$V(x)=\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}\right].$$ * Make a plot of the potential energy and discuss the equilibrium points where the force on the electron is zero. Discuss the physical interpretation of stable and unstable equilibrium points. Use energy conservation. The following Python code plots the potential
###Code
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
Deltax = 0.01
#set up arrays
xinitial = -2.0
xfinal = 2.0
n = ceil((xfinal-xinitial)/Deltax)
x = np.zeros(n)
for i in range(n):
x[i] = xinitial+i*Deltax
V = np.zeros(n)
# Setting values for the constants.
F0 = 1.0; b = 1.0;
# Defining the potential
V = F0*b/(2*pi)*np.cos(2*pi*x/b)
# Plot position as function of time
fig, ax = plt.subplots()
ax.set_ylabel('V')
ax.set_xlabel('x')
ax.plot(x, V)
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
We have stable equilibrium points for every minimum of the $\cos$ function and unstable equilibrium points where it has its maximimum values. At the minimum the particle has the lowest potential energy and the largest kinetic energy whereas at the maxima it has the largest potential energy and lowest kinetic energy. Exsercise: Rocket, Momentum and massTaylor exercise 3.11. Consider the rocket of mass $M$ moving with velocity $v$. After abrief instant, the velocity of the rocket is $v+\Delta v$ and the massis $M-\Delta M$. Momentum conservation gives $$\begin{eqnarray*}Mv&=&(M-\Delta M)(v+\Delta v)+\Delta M(v-v_e)\\0&=&-\Delta Mv+M\Delta v+\Delta M(v-v_e),\\0&=&M\Delta v-\Delta Mv_e.\end{eqnarray*}$$ In the second step we ignored the term $\Delta M\Delta v$ since weassume it is small. The last equation gives $$\begin{eqnarray}\Delta v&=&\frac{v_e}{M}\Delta M,\\\nonumber\frac{dv}{dt}&=&\frac{v_e}{M}\frac{dM}{dt}.\end{eqnarray}$$ Here we let $\Delta v\rightarrow dv$ and $\Delta M\rightarrow dM$.We have also assumed that $M(t) = M_0-kt$. Integrating the expression with lower limits $v_0=0$ and $M_0$, one finds $$\begin{eqnarray*}v&=&v_e\int_{M_0}^M \frac{dM'}{M'}\\v&=&v_e\ln(M/M_0)\\&=&v_e\ln[(M_0-k t)/M_0].\end{eqnarray*}$$ We have ignored gravity here. If we add gravity as the external force, we get when integrating an additional terms $-gt$, that is $$v=v_e\ln[(M_0-k t)/M_0]-gt.$$ Inserting numbers $v_e=3000$ m/s, $M_0/M=2$ and $g=9.8$ m/s$^{2}$, we find $v=900$ m/s. With $g=0$ the corresponding number is $2100$ m/s, so gravity reduces the speed acquired in the first two minutes to a little less than half its weight-free value.If the thrust $\Delta Mv_e$ is less than the weight $mg$, the rocket will just sit on the ground until it has shed enough mass that the thrust can overcome the weight, definitely not a good design. Exercise: More RocketsThis is a continuation of the previous exercise and most of the relevant background material can be found in Taylor chapter 3.2. Taking the velocity from the previous exercise and integrating over time we find the height $$y(t) = y(t_0=0)+\int_0^tv(t')dt',$$ which gives $$y(t) = v_et\ln{M_0}-v_e\int_0^t \ln{M(t')}dt'-\frac{1}{2}gt^2.$$ To do the integral over time we recall that $M(t')=M_0-\Delta M t'$. We assumed that $\Delta M=k$ is a constant.We use that $M_0-M=kt$ and assume that mass decreases by a constant $k$ times time $t$.We obtain then that the integral gives $$\int_0^t \ln{M(t')}dt' = \int_0^t \ln{(M_0-kt')}dt',$$ and defining the variable $u=M_0-kt'$, with $du=-kdt'$ and the new limits $M_0$ when $t=0$ and $M_0-kt$ when time is equal to $t$, we have $$\int_0^t \ln{M(t')}dt' = \int_0^t \ln{(M_0-kt')}dt'=-\frac{1}{k}\int_{M_0}^{M_0-kt} \ln{(u)}du=-\frac{1}{k}\left[u\ln{(u)}-u\right]_{M_0}^{M_0-kt},$$ and writing out we obtain $$-\frac{1}{k}\left[u\ln{(u)}-u\right]_{M_0}^{M_0-kt} = \frac{1}{k}\left(M_0\ln{M_0}-M\ln{M}\right)-t,$$ Mulitplying with $-v_e$ we have $$-\frac{v_e}{k}\left(M_0\ln{M_0}-M\ln{M}\right)+v_et,$$ which we can rewrite as, using $M_0=M+kt$, $$-\frac{v_e}{k}\left((M+kt)\ln{M_0}-M\ln{M}\right)+v_et=v_et-v_et\ln{M_0}-\frac{Mv_e}{k}\ln{(\frac{M_0}{M})}.$$ Inserting into $y(t)$ we obtain then $$y(t) = v_et-\frac{1}{2}gt^2-\frac{Mv_e}{k}\ln{(\frac{M_0}{M})}.$$ Using the numbers from the previous exercise with $t=2$ min we obtain that $y\approx 40$ km.For exercise 3.14 (5pt) we have the equation of motion which reads $Ma=kv_e-bv$ or $$\frac{Mdv}{kv_e-bv}=dt.$$ We have that $dM/dt =-k$ (assumed a constant rate for mass change). We can then replace $dt$ by $-dM/k$ and we have $$\frac{kdv}{kv_e-bv}=-\frac{dM}{M}.$$ Integrating gives $$v = \frac{kv_e}{b}\left[1-(\frac{M}{M_0})^{b/k}\right].$$ Exercise: Center of massTaylor exercise 3.20. Here Taylor's chapter 3.3 can be of use. This relation will turn out to be very useful when we discuss systems of many classical particles.The definition of the center of mass for $N$ objects can be written as $$M\boldsymbol{R}=\sum_{i=1}^Nm_i\boldsymbol{r}_i,$$ where $m_i$ and $\boldsymbol{r}_i$ are the masses and positions of object $i$, respectively.Assume now that we have a collection of $N_1$ objects with masses $m_{1i}$ and positions $\boldsymbol{r}_{1i}$with $i=1,\dots,N_1$ and a collection of $N_2$ objects with masses $m_{2j}$ and positions $\boldsymbol{r}_{2j}$with $j=1,\dots,N_2$.The total mass of the two-body system is $M=M_1+M_2=\sum_{i=1}^{N_1}m_{1i}+\sum_{j=1}^{N_2}m_{2j}$. The center of mass position $\boldsymbol{R}$ of the whole system satisfies then $$M\boldsymbol{R}=\sum_{i=1}^{N_1}m_{1i}\boldsymbol{r}_{1i}+\sum_{j=1}^{N_2}m_{2j}\boldsymbol{r}_{2j}=M_1\boldsymbol{R}_1+M_2\boldsymbol{R}_2,$$ where $\boldsymbol{R}_1$ and $\boldsymbol{R}_2$ are the the center of mass positions of the two separate bodies and the second equality follows from our rewritten definition of the center of mass applied to each body separately. This is the required result. Exercise: The Earth-Sun problemWe start with the Earth-Sun system in two dimensions only. The gravitational force $F_G$ on the earth from the sun is $$\boldsymbol{F}_G=-\frac{GM_{\odot}M_E}{r^3}\boldsymbol{r},$$ where $G$ is the gravitational constant, $$M_E=6\times 10^{24}\mathrm{Kg},$$ the mass of Earth, $$M_{\odot}=2\times 10^{30}\mathrm{Kg},$$ the mass of the Sun and $$r=1.5\times 10^{11}\mathrm{m},$$ is the distance between Earth and the Sun. The latter defines what we call an astronomical unit **AU**.From Newton's second law we have then for the $x$ direction $$\frac{d^2x}{dt^2}=-\frac{F_{x}}{M_E},$$ and $$\frac{d^2y}{dt^2}=-\frac{F_{y}}{M_E},$$ for the $y$ direction.Here we will use that $x=r\cos{(\theta)}$, $y=r\sin{(\theta)}$ and $$r = \sqrt{x^2+y^2}.$$ We can rewrite these equations $$F_{x}=-\frac{GM_{\odot}M_E}{r^2}\cos{(\theta)}=-\frac{GM_{\odot}M_E}{r^3}x,$$ and $$F_{y}=-\frac{GM_{\odot}M_E}{r^2}\sin{(\theta)}=-\frac{GM_{\odot}M_E}{r^3}y,$$ as four first-order coupled differential equations $$\frac{dv_x}{dt}=-\frac{GM_{\odot}}{r^3}x,$$ and $$\frac{dx}{dt}=v_x,$$ and $$\frac{dv_y}{dt}=-\frac{GM_{\odot}}{r^3}y,$$ and $$\frac{dy}{dt}=v_y.$$ The four coupled differential equations $$\frac{dv_x}{dt}=-\frac{GM_{\odot}}{r^3}x,$$ and $$\frac{dx}{dt}=v_x,$$ and $$\frac{dv_y}{dt}=-\frac{GM_{\odot}}{r^3}y,$$ and $$\frac{dy}{dt}=v_y,$$ can be turned into dimensionless equations or we can introduce astronomical units with $1$ AU = $1.5\times 10^{11}$. Using the equations from circular motion (with $r =1\mathrm{AU}$) $$\frac{M_E v^2}{r} = F = \frac{GM_{\odot}M_E}{r^2},$$ we have $$GM_{\odot}=v^2r,$$ and using that the velocity of Earth (assuming circular motion) is$v = 2\pi r/\mathrm{yr}=2\pi\mathrm{AU}/\mathrm{yr}$, we have $$GM_{\odot}= v^2r = 4\pi^2 \frac{(\mathrm{AU})^3}{\mathrm{yr}^2}.$$ The four coupled differential equations can then be discretized using Euler's method as (with step length $h$) $$v_{x,i+1}=v_{x,i}-h\frac{4\pi^2}{r_i^3}x_i,$$ and $$x_{i+1}=x_i+hv_{x,i},$$ and $$v_{y,i+1}=v_{y,i}-h\frac{4\pi^2}{r_i^3}y_i,$$ and $$y_{i+1}=y_i+hv_{y,i},$$ The code here implements Euler's method for the Earth-Sun system using a more compact way of representing the vectors. Alternatively, you could have spelled out all the variables $v_x$, $v_y$, $x$ and $y$ as one-dimensional arrays.
###Code
# Common imports
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
DeltaT = 0.01
#set up arrays
tfinal = 10 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Start integrating using Euler's method
for i in range(n-1):
# Set up the acceleration
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using Euler's forward method
v[i+1] = v[i] + DeltaT*a
r[i+1] = r[i] + DeltaT*v[i]
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
#ax.set_xlim(0, tfinal)
ax.set_xlabel('x[AU]')
ax.set_ylabel('y[AU]')
ax.plot(r[:,0], r[:,1])
fig.tight_layout()
save_fig("EarthSunEuler")
plt.show()
###Output
_____no_output_____
###Markdown
We notice here that Euler's method doesn't give a stable orbit with for example $\Delta t =0.01$. Itmeans that we cannot trust Euler's method. Euler's method does not conserve energy. It is anexample of an integrator which is not[symplectic](https://en.wikipedia.org/wiki/Symplectic_integrator).Here we present thus two methods, which with simple changes allow usto avoid these pitfalls. The simplest possible extension is theso-called Euler-Cromer method. The changes we need to make to ourcode are indeed marginal here. We need simply to replace
###Code
r[i+1] = r[i] + DeltaT*v[i]
###Output
_____no_output_____
###Markdown
in the above code with the velocity at the new time $t_{i+1}$
###Code
r[i+1] = r[i] + DeltaT*v[i+1]
###Output
_____no_output_____
###Markdown
By this simple caveat we get stable orbits. Below we derive theEuler-Cromer method as well as one of the most utlized algorithms forsolving the above type of problems, the so-called Velocity-Verletmethod.Let us repeat Euler's method.We have a differential equation $$\begin{equation} y'(t_i)=f(t_i,y_i) \label{_auto7} \tag{14}\end{equation}$$ and if we truncate at the first derivative, we have from the Taylor expansion $$y_{i+1}=y(t_i) + (\Delta t) f(t_i,y_i) + O(\Delta t^2),$$ which when complemented with $t_{i+1}=t_i+\Delta t$ formsthe algorithm for the well-known Euler method. Note that at every step we make an approximation errorof the order of $O(\Delta t^2)$, however the total error is the sum over allsteps $N=(b-a)/(\Delta t)$ for $t\in [a,b]$, yielding thus a global error which goes like$NO(\Delta t^2)\approx O(\Delta t)$. To make Euler's method more precise we can obviouslydecrease $\Delta t$ (increase $N$), but this can lead to loss of numerical precision.Euler's method is not recommended for precision calculation,although it is handy to use in order to get a firstview on how a solution may look like.Euler's method is asymmetric in time, since it uses information about the derivative at the beginningof the time interval. This means that we evaluate the position at $y_1$ using the velocityat $v_0$. A simple variation is to determine $x_{n+1}$ using the velocity at$v_{n+1}$, that is (in a slightly more generalized form) $$\begin{equation} y_{n+1}=y_{n}+ v_{n+1}+O(\Delta t^2)\label{_auto8} \tag{15}\end{equation}$$ and $$\begin{equation} v_{n+1}=v_{n}+(\Delta t) a_{n}+O(\Delta t^2).\label{_auto9} \tag{16}\end{equation}$$ The acceleration $a_n$ is a function of $a_n(y_n, v_n, t_n)$ and needs to be evaluatedas well. This is the Euler-Cromer method. It is easy to change the above code and see that with the same time step we get stable results.Let us stay with $x$ (position) and $v$ (velocity) as the quantities we are interested in.We have the Taylor expansion for the position given by $$x_{i+1} = x_i+(\Delta t)v_i+\frac{(\Delta t)^2}{2}a_i+O((\Delta t)^3).$$ The corresponding expansion for the velocity is $$v_{i+1} = v_i+(\Delta t)a_i+\frac{(\Delta t)^2}{2}v^{(2)}_i+O((\Delta t)^3).$$ Via Newton's second law we have normally an analytical expression for the derivative of the velocity, namely $$a_i= \frac{d^2 x}{dt^2}\vert_{i}=\frac{d v}{dt}\vert_{i}= \frac{F(x_i,v_i,t_i)}{m}.$$ If we add to this the corresponding expansion for the derivative of the velocity $$v^{(1)}_{i+1} = a_{i+1}= a_i+(\Delta t)v^{(2)}_i+O((\Delta t)^2)=a_i+(\Delta t)v^{(2)}_i+O((\Delta t)^2),$$ and retain only terms up to the second derivative of the velocity since our error goes as $O(h^3)$, we have $$(\Delta t)v^{(2)}_i\approx a_{i+1}-a_i.$$ We can then rewrite the Taylor expansion for the velocity as $$v_{i+1} = v_i+\frac{(\Delta t)}{2}\left( a_{i+1}+a_{i}\right)+O((\Delta t)^3).$$ Our final equations for the position and the velocity become then $$x_{i+1} = x_i+(\Delta t)v_i+\frac{(\Delta t)^2}{2}a_{i}+O((\Delta t)^3),$$ and $$v_{i+1} = v_i+\frac{(\Delta t)}{2}\left(a_{i+1}+a_{i}\right)+O((\Delta t)^3).$$ Note well that the term $a_{i+1}$ depends on the position at $x_{i+1}$. This means that you need to calculate the position at the updated time $t_{i+1}$ before the computing the next velocity. Note also that the derivative of the velocity at the time$t_i$ used in the updating of the position can be reused in the calculation of the velocity update as well. We can now easily add the Verlet method to our original code as
###Code
DeltaT = 0.01
#set up arrays
tfinal = 10
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Start integrating using the Velocity-Verlet method
for i in range(n-1):
# Set up forces, air resistance FD, note now that we need the norm of the vecto
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using the Velocity-Verlet method
r[i+1] = r[i] + DeltaT*v[i]+0.5*(DeltaT**2)*a
rabs = sqrt(sum(r[i+1]*r[i+1]))
anew = -4*(pi**2)*r[i+1]/(rabs**3)
v[i+1] = v[i] + 0.5*DeltaT*(a+anew)
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
ax.set_xlabel('x[AU]')
ax.set_ylabel('y[AU]')
ax.plot(r[:,0], r[:,1])
fig.tight_layout()
save_fig("EarthSunVV")
plt.show()
###Output
_____no_output_____
###Markdown
You can easily generalize the calculation of the forces by defining a functionwhich takes in as input the various variables. We leave this as a challenge to you.Running the above code for various time steps we see that the Velocity-Verlet is fully stable for various time steps.We can also play around with different initial conditions in order to find the escape velocity from an orbit around the sun with distance one astronomical unit, 1 AU. The theoretical value for the escape velocity, is given by $$v = \sqrt{8\pi^2}{r},$$ and with $r=1$ AU, this means that the escape velocity is $2\pi\sqrt{2}$ AU/yr. To obtain this we required that the kinetic energy of Earth equals the potential energy given by the gravitational force.Setting $$\frac{1}{2}M_{\mathrm{Earth}}v^2=\frac{GM_{\odot}}{r},$$ and with $GM_{\odot}=4\pi^2$ we obtain the above relation for the velocity. Setting an initial velocity say equal to $9$ in the above code, yields a planet (Earth) which escapes a stable orbit around the sun, as seen by running the code here.
###Code
DeltaT = 0.01
#set up arrays
tfinal = 100
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
# setting initial velocity larger than escape velocity
v0 = np.array([0.0,9.0])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Start integrating using the Velocity-Verlet method
for i in range(n-1):
# Set up forces, air resistance FD, note now that we need the norm of the vecto
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using the Velocity-Verlet method
r[i+1] = r[i] + DeltaT*v[i]+0.5*(DeltaT**2)*a
rabs = sqrt(sum(r[i+1]*r[i+1]))
anew = -4*(pi**2)*r[i+1]/(rabs**3)
v[i+1] = v[i] + 0.5*DeltaT*(a+anew)
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
ax.set_xlabel('x[AU]')
ax.set_ylabel('y[AU]')
ax.plot(r[:,0], r[:,1])
fig.tight_layout()
save_fig("EscapeEarthSunVV")
plt.show()
###Output
_____no_output_____
###Markdown
Exercise Conservative forcesWhich of the following force are conservative? All three forces depend only on $\boldsymbol{r}$ and satisfy the first condition for being conservative.* $\boldsymbol{F}=k(x\boldsymbol{i}+2y\boldsymbol{j}+3z\boldsymbol{k})$ where $k$ is a constant.The **curl** is zero and the force is conservative. The potential energy is upon integration $V(x)=-k(1/2x^2+y^2+3/2z^2)$. Taking the derivative shows that this is indeed the case since it gives back the force.* $\boldsymbol{F}=y\boldsymbol{i}+x\boldsymbol{j}+0\boldsymbol{k}$. This force is also conservative since it depends only on the coordinates and its curl is zero. To find the potential energy, since the integral is path independent, we can choose to integrate along any direction. The simplest is start from $x=0$ as origin and follow a path along the $x$-axis (which gives zero) and then parallel to the $y$-axis, which results in $V(x,y) = -xy$. Taking the derivative with respect to $x$ and $y$ gives us back the expression for the force.* $\boldsymbol{F}=k(-y\boldsymbol{i}+x\boldsymbol{j}+0\boldsymbol{k})$ where $k$ is a constant.Here the **curl** is $(0,0,2)$ and the force is not conservative.* 2d For those which are conservative, find the corresponding potential energy $V$ and verify that direct differentiation that $\boldsymbol{F}=-\boldsymbol{\nabla} V$.See the answers to each exercise above. Exercise: The Lennard-Jones potential[The Lennard-Jones potential](https://en.wikipedia.org/wiki/Lennard-Jones_potential) is often used to describethe interaction between two atoms or ions or molecules. If you end up doing materals science and molecular dynamics calculations, it is very likely that you will encounter this potential model.The expression for the potential energy is $$V(r) = V_0\left((\frac{a}{r})^{12}-(\frac{b}{r})^{6}\right),$$ where $V_0$, $a$ and $b$ are constants and the potential depends only on the relative distance between two objects$i$ and $j$, that is $r=\vert\vert\boldsymbol{r}_i-\boldsymbol{r}_j\vert\vert=\sqrt{(x_i-x_j)^2+(y_i-y_j)^2+(z_i-z_j)^2}$.* Sketch/plot the potential (choose some values for the constants in doing so).The following Python code plots the potential
###Code
# Common imports
import numpy as np
from math import *
import matplotlib.pyplot as plt
Deltar = 0.01
#set up arrays
rinitial = 1.8
rfinal = 3.
n = ceil((rfinal-rinitial)/Deltar)
r = np.zeros(n)
for i in range(n):
r[i] = rinitial+i*Deltar
V = np.zeros(n)
# Initial conditions as compact 2-dimensional arrays
a = 2.0
b = 2.0
V0 = 10.0
V = V0*((a/r)**(12)-(b/r)**6)
# Plot position as function of time
fig, ax = plt.subplots()
#ax.set_xlim(0, tfinal)
ax.set_ylabel('V')
ax.set_xlabel('r')
ax.plot(r, V)
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
* Find and classify the equilibrium points.Here there is only one equilibrium point when we take the derivative of the potential with respect to the relative distance.The derivative with respect to $r$, the relative distance, is $$\frac{dV}{dr} = -6V_0\left(2\frac{a^{12}}{r^{13}}-\frac{b^6}{r^7}\right),$$ and this is zero when $$r = 2^{1/6}\frac{a^2}{b}.$$ If we choose $a=2$ and $b=2$ then $r=2\times 2^{1/6}$. Since the second derivative is positive for all $r$ for our choices of $a$ and $b$ (convince yourself about this), then this value of $r$ has to correspond to a minimum of the potential. This agrees with our graph from the figure above (run the code to produce the figure). * What is the force acting on one of the objects (an atom for example) from the other object? Is this a conservative force?From the previous exercise we have $$\frac{dV}{dr} = -6V_0\left(2\frac{a^{12}}{r^{13}}-\frac{b^6}{r^7}\right).$$ We need the gradient and since the force on particle $i$ is given by $\boldsymbol{F}_i=\boldsymbol{\nabla}_i V(\boldsymbol{r}_i-\boldsymbol{r}_j)$, we obtain $$\boldsymbol{F}_i=6V_0\left(2(\frac{a}{\vert\vert\boldsymbol{r}_i-\boldsymbol{r}_j\vert\vert})^{12}-(\frac{b}{\vert\vert\boldsymbol{r}_i-\boldsymbol{r}_j\vert\vert})^6\right)\frac{\boldsymbol{r}_i-\boldsymbol{r}_j}{\vert\vert\boldsymbol{r}_i-\boldsymbol{r}_j\vert\vert^2}.$$ Here $r = \vert\vert \boldsymbol{r}_i-\boldsymbol{r}_j\vert \vert$.If we have more than two particles, we need to sum over all other particles $j$. We have thus to introduce a sum over all particles $N$. The force on particle $i$ at position $\boldsymbol{r}_i$ from all particles $j$ at their positions $\boldsymbol{r}_j$ results in the equation of motion (note that we have divided by the mass $m$ here) $$\boldsymbol{a}_i=\frac{d^2\boldsymbol{r}_i}{dt^2} = \frac{6V_0}{m} \sum_{j \neq i}^{N}\left(2(\frac{a}{\vert\vert\boldsymbol{r}_i-\boldsymbol{r}_j\vert\vert})^{12}-(\frac{b}{\vert\vert\boldsymbol{r}_i-\boldsymbol{r}_j\vert\vert})^6\right)\frac{\boldsymbol{r}_i-\boldsymbol{r}_j}{\vert\vert\boldsymbol{r}_i-\boldsymbol{r}_j\vert\vert^2}.$$ This is also a conservative force, with zero **curl** as well. Exercise: particle in a new potentialConsider a particle of mass $m$ moving in a one-dimensional potential, $$V(x)=-\alpha\frac{x^2}{2}+\beta\frac{x^4}{4}.$$ * Plot the potential and discuss eventual equilibrium points. Is this a conservative force?The following Python code gives a plot of potential
###Code
# Common imports
import numpy as np
from math import *
import matplotlib.pyplot as plt
Deltax = 0.01
#set up arrays
xinitial = -2.0
xfinal = 2.0
n = ceil((xfinal-xinitial)/Deltax)
x = np.zeros(n)
for i in range(n):
x[i] = xinitial+i*Deltax
V = np.zeros(n)
# Initial conditions as compact 2-dimensional arrays
alpha = 0.81
beta = 0.5
print(sqrt(alpha/beta))
V = -alpha*x*x*0.5 + beta*(x**4)*0.25
# Plot position as function of time
fig, ax = plt.subplots()
#ax.set_xlim(0, tfinal)
ax.set_xlabel('x')
ax.set_ylabel('V[s]')
ax.plot(x, V)
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Here we have chosen $\alpha=0.81$ and $\beta=0.5$. Taking the derivative of $V$ with respect to $x$ gives two minima (and it is easy to see here that the second derivative is positive) at $x\pm\sqrt{\alpha/\beta}$ and a maximum at $x=0$. The derivative is $$\frac{dV}{dx} = -\alpha x + \beta x^3,$$ which gives when we require that it should equal zero the above values. As we can see from the plot (run the above Python code), we have two so-called stable equilibrium points (where the potential has its minima) and an unstable equilibrium point.The force is conservative since it depends only on $x$ and has a **curl** which is zero.* Compute the second derivative of the potential and find its miminum position(s). Using the Taylor expansion of the potential around its minimum (see Taylor section 5.1) to define a spring constant $k$. Use the spring constant to find the natural (angular) frequency $\omega_0=\sqrt{k/m}$. We call the new spring constant for an effective spring constant.In the solution to the previous exercise we listed the values where the derivatives of the potential are zero.Taking the second derivatives we have that $$\frac{d^2V}{dx^2} = -\alpha + 3\beta x^2,$$ and for $\alpha,\beta > 0$ (we assume they are positive constants) we see that when $x=0$ that the the second derivative is negative, which means this is a maximum. For $x=\pm\sqrt{\alpha/\beta}$ we see that the second derivative is positive. Thus these points correspond to two minima.Assume now we Taylor-expand the potential around one of these minima, say $x_{\mathrm{min}}=\sqrt{\alpha/\beta}$. We have thus $$V(x) = V(x_{\mathrm{min}})+(x-x_{\mathrm{min}})\frac{dV}{dx}\vert_{x_{\mathrm{min}}}+\frac{1}{2}(x-x_{\mathrm{min}})^2\frac{d^2V}{dx^2}\vert_{x_{\mathrm{min}}}+\dots$$ Since we are at point where the first derivative is zero and inserting the value for the second derivative of $V$, keeping only terms up to the second derivative and finally taking the derivative with respect to $x$, we find the expression for the force $$F(x) = -(x-x_{\mathrm{min}})\frac{d^2V}{dx^2}\vert_{x_{\mathrm{min}}},$$ and setting in the expression for the second derivative at the minimum we find $$F(x) = -2\alpha(x-\sqrt{\frac{\alpha}{\beta}}).$$ Thus our effective spring constant $k=2\alpha$.* We ignore the second term in the potential energy and keep only the term proportional to the effective spring constant, that is a force $F\propto kx$. Find the acceleration and set up the differential equation. Find the general analytical solution for these harmonic oscillations. You don't need to find the constants in the general solution.Here we simplify our force by rescaling our zeroth point so that we have a force (setting $x_{\mathrm{min}}=0$) $$F(x) = -kx,$$ with $k=2\alpha$. Defining a natural frequency $\omega_0 = \sqrt{k/m}$, where $m$ is the mass of our particle, we have the following equation of motion $$\frac{d^2x}{dt^2}=-\omega_0^2x,$$ which has as analytical solution $x(t)=A\cos{(\omega_0t)}+B\sin{(\omega_0t)}$ and velocity$x(t)=-\omega_0A\sin{(\omega_0t)}+\omega_0B\cos{(\omega_0t)}$. The initial conditions are used to define $A$ and $B$. Exercise: Testing Energy conservationThe code here implements Euler's method for the Earth-Sun system usinga more compact way of representing the vectors. Alternatively, youcould have spelled out all the variables $v_x$, $v_y$, $x$ and $y$ asone-dimensional arrays. It tests conservation of potential andkinetic energy as functions of time, in addition to the total energy,again as function of time**Note**: in all codes we have used scaled equations so that the gravitational constant times the mass of the sum is given by $4\pi^2$ and the mass of the earth is set to **one** in the calculations of kinetic and potential energies. Else, we would get very large results.
###Code
# Common imports
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
# Initial values, time step, positions and velocites
DeltaT = 0.0001
#set up arrays
tfinal = 100 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# setting up the kinetic, potential and total energy, note only functions of time
EKinetic = np.zeros(n)
EPotential = np.zeros(n)
ETotal = np.zeros(n)
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Setting up variables for the calculation of energies
# distance that defines rabs in potential energy
rabs0 = sqrt(sum(r[0]*r[0]))
# Initial kinetic energy. Note that we skip the mass of the Earth here, that is MassEarth=1 in all codes
EKinetic[0] = 0.5*sum(v0*v0)
# Initial potential energy (note negative sign, why?)
EPotential[0] = -4*pi*pi/rabs0
# Initial total energy
ETotal[0] = EPotential[0]+EKinetic[0]
# Start integrating using Euler's method
for i in range(n-1):
# Set up the acceleration
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update Energies, velocity, time and position using Euler's forward method
v[i+1] = v[i] + DeltaT*a
r[i+1] = r[i] + DeltaT*v[i]
t[i+1] = t[i] + DeltaT
EKinetic[i+1] = 0.5*sum(v[i+1]*v[i+1])
EPotential[i+1] = -4*pi*pi/sqrt(sum(r[i+1]*r[i+1]))
ETotal[i+1] = EPotential[i+1]+EKinetic[i+1]
# Plot energies as functions of time
fig, axs = plt.subplots(3, 1)
axs[0].plot(t, EKinetic)
axs[0].set_xlim(0, tfinal)
axs[0].set_ylabel('Kinetic energy')
axs[1].plot(t, EPotential)
axs[1].set_ylabel('Potential Energy')
axs[2].plot(t, ETotal)
axs[2].set_xlabel('Time [yr]')
axs[2].set_ylabel('Total Energy')
fig.tight_layout()
save_fig("EarthSunEuler")
plt.show()
###Output
_____no_output_____
###Markdown
We see very clearly that Euler's method does not conserve energy!! Try to reduce the time step $\Delta t$. What do you see?With the Euler-Cromer method, the only thing we need is to update theposition at a time $t+1$ with the update velocity from the sametime. Thus, the change in the code is extremely simply, and **energy issuddenly conserved**. Note that the error runs like $O(\Delta t)$ andthis is why we see the larger oscillations. But within thisoscillating energy envelope, we see that the energies swing between amax and a min value and never exceed these values.
###Code
# Common imports
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
# Initial values, time step, positions and velocites
DeltaT = 0.0001
#set up arrays
tfinal = 100 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# setting up the kinetic, potential and total energy, note only functions of time
EKinetic = np.zeros(n)
EPotential = np.zeros(n)
ETotal = np.zeros(n)
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Setting up variables for the calculation of energies
# distance that defines rabs in potential energy
rabs0 = sqrt(sum(r[0]*r[0]))
# Initial kinetic energy. Note that we skip the mass of the Earth here, that is MassEarth=1 in all codes
EKinetic[0] = 0.5*sum(v0*v0)
# Initial potential energy
EPotential[0] = -4*pi*pi/rabs0
# Initial total energy
ETotal[0] = EPotential[0]+EKinetic[0]
# Start integrating using Euler's method
for i in range(n-1):
# Set up the acceleration
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using Euler's forward method
v[i+1] = v[i] + DeltaT*a
# Only change when we add the Euler-Cromer method
r[i+1] = r[i] + DeltaT*v[i+1]
t[i+1] = t[i] + DeltaT
EKinetic[i+1] = 0.5*sum(v[i+1]*v[i+1])
EPotential[i+1] = -4*pi*pi/sqrt(sum(r[i+1]*r[i+1]))
ETotal[i+1] = EPotential[i+1]+EKinetic[i+1]
# Plot energies as functions of time
fig, axs = plt.subplots(3, 1)
axs[0].plot(t, EKinetic)
axs[0].set_xlim(0, tfinal)
axs[0].set_ylabel('Kinetic energy')
axs[1].plot(t, EPotential)
axs[1].set_ylabel('Potential Energy')
axs[2].plot(t, ETotal)
axs[2].set_xlabel('Time [yr]')
axs[2].set_ylabel('Total Energy')
fig.tight_layout()
save_fig("EarthSunEulerCromer")
plt.show()
###Output
_____no_output_____
###Markdown
Our final equations for the position and the velocity become then $$x_{i+1} = x_i+(\Delta t)v_i+\frac{(\Delta t)^2}{2}a_{i}+O((\Delta t)^3),$$ and $$v_{i+1} = v_i+\frac{(\Delta t)}{2}\left(a_{i+1}+a_{i}\right)+O((\Delta t)^3).$$ Note well that the term $a_{i+1}$ depends on the position at $x_{i+1}$. This means that you need to calculate the position at the updated time $t_{i+1}$ before the computing the next velocity. Note also that the derivative of the velocity at the time$t_i$ used in the updating of the position can be reused in the calculation of the velocity update as well. We can now easily add the Verlet method to our original code as
###Code
DeltaT = 0.001
#set up arrays
tfinal = 100
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# setting up the kinetic, potential and total energy, note only functions of time
EKinetic = np.zeros(n)
EPotential = np.zeros(n)
ETotal = np.zeros(n)
# Setting up variables for the calculation of energies
# distance that defines rabs in potential energy
rabs0 = sqrt(sum(r[0]*r[0]))
# Initial kinetic energy. Note that we skip the mass of the Earth here, that is MassEarth=1 in all codes
EKinetic[0] = 0.5*sum(v0*v0)
# Initial potential energy
EPotential[0] = -4*pi*pi/rabs0
# Initial total energy
ETotal[0] = EPotential[0]+EKinetic[0]
# Start integrating using the Velocity-Verlet method
for i in range(n-1):
# Set up forces, air resistance FD, note now that we need the norm of the vecto
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using the Velocity-Verlet method
r[i+1] = r[i] + DeltaT*v[i]+0.5*(DeltaT**2)*a
rabs = sqrt(sum(r[i+1]*r[i+1]))
anew = -4*(pi**2)*r[i+1]/(rabs**3)
v[i+1] = v[i] + 0.5*DeltaT*(a+anew)
t[i+1] = t[i] + DeltaT
EKinetic[i+1] = 0.5*sum(v[i+1]*v[i+1])
EPotential[i+1] = -4*pi*pi/sqrt(sum(r[i+1]*r[i+1]))
ETotal[i+1] = EPotential[i+1]+EKinetic[i+1]
# Plot energies as functions of time
fig, axs = plt.subplots(3, 1)
axs[0].plot(t, EKinetic)
axs[0].set_xlim(0, tfinal)
axs[0].set_ylabel('Kinetic energy')
axs[1].plot(t, EPotential)
axs[1].set_ylabel('Potential Energy')
axs[2].plot(t, ETotal)
axs[2].set_xlabel('Time [yr]')
axs[2].set_ylabel('Total Energy')
fig.tight_layout()
save_fig("EarthSunVelocityVerlet")
plt.show()
###Output
_____no_output_____
###Markdown
Work, Energy, Momentum and Conservation lawsIn the previous three chapters we have shown how to use Newton’s laws ofmotion to determine the motion of an object based on the forces actingon it. For two of the cases there is an underlying assumption that we can find an analytical solution to a continuous problem.With a continuous problem we mean a problem where the various variables can take any value within a finite or infinite interval. Unfortunately, in many cases wecannot find an exact solution to the equations of motion we get fromNewton’s second law. The numerical approach, where we discretize the continuous problem, allows us however to study a much richer set of problems.For problems involving Newton's laws and the various equations of motion we encounter, solving the equations numerically, is the standard approach.It allows us to focus on the underlying forces. Often we end up using the same numerical algorithm for different problems.Here we introduce a commonly used technique that allows us to find thevelocity as a function of position without finding the position as afunction of time—an alternate form of Newton’s second law. The methodis based on a simple principle: Instead of solving the equations ofmotion directly, we integrate the equations of motion. Such a methodis called an integration method. This allows us also to introduce the **work-energy** theorem. Thistheorem allows us to find the velocity as a function of position foran object even in cases when we cannot solve the equations ofmotion. This introduces us to the concept of work and kinetic energy,an energy related to the motion of an object.And finally, we will link the work-energy theorem with the principle of conservation of energy. The Work-Energy TheoremLet us define the kinetic energy $K$ with a given velocity $\boldsymbol{v}$ $$K=\frac{1}{2}mv^2,$$ where $m$ is the mass of the object we are considering.We assume also that there is a force $\boldsymbol{F}$ acting on the given object $$\boldsymbol{F}=\boldsymbol{F}(\boldsymbol{r},\boldsymbol{v},t),$$ with $\boldsymbol{r}$ the position and $t$ the time.In general we assume the force is a function of all these variables.Many of the more central forces in Nature however, depende only on theposition. Examples are the gravitational force and the force derivedfrom the Coulomb potential in electromagnetism.Let us study the derivative of the kinetic energy with respect to time $t$. Its continuous form is $$\frac{dK}{dt}=\frac{1}{2}m\frac{d\boldsymbol{v}\cdot\boldsymbol{v}}{dt}.$$ Using our results from exercise 3 of homework 1, we can write the derivative of a vector dot product as $$\frac{dK}{dt}=\frac{1}{2}m\frac{d\boldsymbol{v}\cdot\boldsymbol{v}}{dt}= \frac{1}{2}m\left(\frac{d\boldsymbol{v}}{dt}\cdot\boldsymbol{v}+\boldsymbol{v}\cdot\frac{d\boldsymbol{v}}{dt}\right)=m\frac{d\boldsymbol{v}}{dt}\cdot\boldsymbol{v}.$$ We know also that the acceleration is defined as $$\boldsymbol{a}=\frac{\boldsymbol{F}}{m}=\frac{d\boldsymbol{v}}{dt}.$$ We can then rewrite the equation for the derivative of the kinetic energy as $$\frac{dK}{dt}=m\frac{d\boldsymbol{v}}{dt}\boldsymbol{v}=\boldsymbol{F}\frac{d\boldsymbol{r}}{dt},$$ where we defined the velocity as the derivative of the position with respect to time.Let us now discretize the above equation by letting the instantaneous terms be replaced by a discrete quantity, that iswe let $dK\rightarrow \Delta K$, $dt\rightarrow \Delta t$, $d\boldsymbol{r}\rightarrow \Delta \boldsymbol{r}$ and $d\boldsymbol{v}\rightarrow \Delta \boldsymbol{v}$.We have then $$\frac{\Delta K}{\Delta t}=m\frac{\Delta \boldsymbol{v}}{\Delta t}\boldsymbol{v}=\boldsymbol{F}\frac{\Delta \boldsymbol{r}}{\Delta t},$$ or by multiplying out $\Delta t$ we have $$\Delta K=\boldsymbol{F}\Delta \boldsymbol{r}.$$ We define this quantity as the **work** done by the force $\boldsymbol{F}$during the displacement $\Delta \boldsymbol{r}$. If we study the dimensionalityof this problem we have mass times length squared divided by timesquared, or just dimension energy.If we now define a series of such displacements $\Delta\boldsymbol{r}$ we have a difference in kinetic energy at a final position $\boldsymbol{r}_n$ and an initial position $\boldsymbol{r}_0$ given by $$\Delta K=\frac{1}{2}mv_n^2-\frac{1}{2}mv_0^2=\sum_{i=0}^n\boldsymbol{F}_i\Delta \boldsymbol{r},$$ where $\boldsymbol{F}_i$ are the forces acting at every position $\boldsymbol{r}_i$.The work done by acting with a force on a set of displacements canthen be as expressed as the difference between the initial and finalkinetic energies.This defines the **work-energy** theorem.If we take the limit $\Delta \boldsymbol{r}\rightarrow 0$, we can rewrite the sum over the various displacements in terms of an integral, that is $$\Delta K=\frac{1}{2}mv_n^2-\frac{1}{2}mv_0^2=\sum_{i=0}^n\boldsymbol{F}_i\Delta \boldsymbol{r}\rightarrow \int_{\boldsymbol{r}_0}^{\boldsymbol{r}_n}\boldsymbol{F}(\boldsymbol{r},\boldsymbol{v},t)d\boldsymbol{r}.$$ This integral defines a path integral since it will depend on the given path we take between the two end points. We will replace the limits with the symbol $c$ in order to indicate that we take a specific countour in space when the force acts on the system. That is the work $W_{n0}$ between two points $\boldsymbol{r}_n$ and $\boldsymbol{r}_0$ is labeled as $$W_{n0}=\frac{1}{2}mv_n^2-\frac{1}{2}mv_0^2=\int_{c}\boldsymbol{F}(\boldsymbol{r},\boldsymbol{v},t)d\boldsymbol{r}.$$ Note that if the force is perpendicular to the displacement, then the force does not affect the kinetic energy.Let us now study some examples of forces and how to find the velocity from the integration over a given path.Thereafter we study how to evaluate an integral numerically.In order to study the work- energy, we will normally need to performa numerical integration, unless we can integrate analytically. Here wepresent some of the simpler methods such as the **rectangle** rule, the **trapezoidal** rule and higher-order methods like the Simpson family of methods. Example of an Electron moving along a SurfaceAs an example, let us consider the following case.We have classical electron which moves in the $x$-direction along a surface. The force from the surface is $$\boldsymbol{F}(x)=-F_0\sin{(\frac{2\pi x}{b})}\boldsymbol{e}_1.$$ The constant $b$ represents the distance between atoms at the surface of the material, $F_0$ is a constant and $x$ is the position of the electron.Using the work-energy theorem we can find the work $W$ done when moving an electron from a position $x_0$ to a final position $x$ through the integral $$W=\int_{x_0}^x \boldsymbol{F}(x')dx' = -\int_{x_0}^x F_0\sin{(\frac{2\pi x'}{b})} dx',$$ which results in $$W=\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right].$$ If we now use the work-energy theorem we can find the the velocity at a final position $x$ by setting upthe differences in kinetic energies between the final position and the initial position $x_0$.We have that the work done by the force is given by the difference in kinetic energies as $$W=\frac{1}{2}m\left(v^2(x)-v^2(x_0)\right)=\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right],$$ and labeling $v(x_0)=v_0$ (and assuming we know the initial velocity) we have $$v(x)=\pm \sqrt{v_0^2+\frac{F_0b}{m\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right]},$$ Choosing $x_0=0$m and $v_0=0$m/s we can simplify the above equation to $$v(x)=\pm \sqrt{\frac{F_0b}{m\pi}\left[\cos{(\frac{2\pi x}{b})}-1\right]},$$ Harmonic OscillationsAnother well-known force (and we will derive when we come to HarmonicOscillations) is the case of a sliding block attached to a wallthrough a spring. The block is attached to a spring with springconstant $k$. The other end of the spring is attached to the wall atthe origin $x=0$. We assume the spring has an equilibrium length$L_0$.The force $F_x$ from the spring on the block is then $$F_x=-k(x-L_0).$$ The position $x$ where the spring force is zero is called the equilibrium position. In our case this is$x=L_0$.We can now compute the work done by this force when we move our block from an initial position $x_0$ to a final position $x$ $$W=\int_{x_0}^{x}F_xdx'=-k\int_{x_0}^{x}(x'-L_0)dx'=\frac{1}{2}k(x_0-L_0)^2-\frac{1}{2}k(x-L_0)^2.$$ If we now bring back the definition of the work-energy theorem in terms of the kinetic energy we have $$W=\frac{1}{2}mv^2(x)-\frac{1}{2}mv_0^2=\frac{1}{2}k(x_0-L_0)^2-\frac{1}{2}k(x-L_0)^2,$$ which we rewrite as $$\frac{1}{2}mv^2(x)+\frac{1}{2}k(x-L_0)^2=\frac{1}{2}mv_0^2+\frac{1}{2}k(x_0-L_0)^2.$$ What does this mean? The total energy, which is the sum of potential and kinetic energy, is conserved.Wow, this sounds interesting. We will analyze this next week in more detail when we study energy, momentum and angular momentum conservation. Numerical IntegrationLet us now see how we could have solved the above integral numerically.There are several numerical algorithms for finding an integralnumerically. The more familiar ones like the rectangular rule or thetrapezoidal rule have simple geometric interpretations.Let us look at the mathematical details of what are called equal-step methods, also known as Newton-Cotes quadrature. Newton-Cotes Quadrature or equal-step methodsThe integral $$\begin{equation} I=\int_a^bf(x) dx\label{eq:integraldef} \tag{1}\end{equation}$$ has a very simple meaning. The integral is thearea enscribed by the function $f(x)$ starting from $x=a$ to $x=b$. It is subdivided in several smaller areas whose evaluation is to be approximated by different techniques. The areas under the curve can for example be approximated by rectangular boxes or trapezoids.In considering equal step methods, our basic approach is that of approximatinga function $f(x)$ with a polynomial of at most degree $N-1$, given $N$ integration points. If our polynomial is of degree $1$,the function will be approximated with $f(x)\approx a_0+a_1x$. The algorithm for these integration methods is rather simple, and the number of approximations perhaps unlimited!* Choose a step size $h=(b-a)/N$ where $N$ is the number of steps and $a$ and $b$ the lower and upper limits of integration.* With a given step length we rewrite the integral as $$\int_a^bf(x) dx= \int_a^{a+h}f(x)dx + \int_{a+h}^{a+2h}f(x)dx+\dots \int_{b-h}^{b}f(x)dx.$$ * The strategy then is to find a reliable polynomial approximation for $f(x)$ in the various intervals. Choosing a given approximation for $f(x)$, we obtain a specific approximation to the integral.* With this approximation to $f(x)$ we perform the integration by computing the integrals over all subintervals.One possible strategy then is to find a reliable polynomial expansion for $f(x)$ in the smallersubintervals. Consider for example evaluating $$\int_a^{a+2h}f(x)dx,$$ which we rewrite as $$\begin{equation}\int_a^{a+2h}f(x)dx=\int_{x_0-h}^{x_0+h}f(x)dx.\label{eq:hhint} \tag{2}\end{equation}$$ We have chosen a midpoint $x_0$ and have defined $x_0=a+h$. The rectangle methodA very simple approach is the so-called midpoint or rectangle method.In this case the integration area is split in a given number of rectangles with length $h$ and height given by the mid-point value of the function. This gives the following simple rule for approximating an integral $$\begin{equation}I=\int_a^bf(x) dx \approx h\sum_{i=1}^N f(x_{i-1/2}), \label{eq:rectangle} \tag{3}\end{equation}$$ where $f(x_{i-1/2})$ is the midpoint value of $f$ for a given rectangle. We will discuss its truncation error below. It is easy to implement this algorithm, as shown belowThe correct mathematical expression for the local error for the rectangular rule $R_i(h)$ for element $i$ is $$\int_{-h}^hf(x)dx - R_i(h)=-\frac{h^3}{24}f^{(2)}(\xi),$$ and the global error reads $$\int_a^bf(x)dx -R_h(f)=-\frac{b-a}{24}h^2f^{(2)}(\xi),$$ where $R_h$ is the result obtained with rectangular rule and $\xi \in [a,b]$.We go back to our simple example above and set $F_0=b=1$ and choose $x_0=0$ and $x=1/2$, and have $$W=\frac{1}{\pi}.$$ The code here computes the integral using the rectangle rule and $n=100$ integration points we have a relative error of$10^{-5}$.
###Code
from math import sin, pi
import numpy as np
from sympy import Symbol, integrate
# function for the Rectangular rule
def Rectangular(a,b,f,n):
h = (b-a)/float(n)
s = 0
for i in range(0,n,1):
x = (i+0.5)*h
s = s+ f(x)
return h*s
# function to integrate
def function(x):
return sin(2*pi*x)
# define integration limits and integration points
a = 0.0; b = 0.5;
n = 100
Exact = 1./pi
print("Relative error= ", abs( (Rectangular(a,b,function,n)-Exact)/Exact))
###Output
Relative error= 4.112453549290521e-05
###Markdown
The trapezoidal ruleThe other integral gives $$\int_{x_0-h}^{x_0}f(x)dx=\frac{h}{2}\left(f(x_0) + f(x_0-h)\right)+O(h^3),$$ and adding up we obtain $$\begin{equation} \int_{x_0-h}^{x_0+h}f(x)dx=\frac{h}{2}\left(f(x_0+h) + 2f(x_0) + f(x_0-h)\right)+O(h^3),\label{eq:trapez} \tag{4}\end{equation}$$ which is the well-known trapezoidal rule. Concerning the error in the approximation made,$O(h^3)=O((b-a)^3/N^3)$, you should note that this is the local error. Since we are splitting the integral from$a$ to $b$ in $N$ pieces, we will have to perform approximately $N$ such operations.This means that the *global error* goes like $\approx O(h^2)$. The trapezoidal reads then $$\begin{equation} I=\int_a^bf(x) dx=h\left(f(a)/2 + f(a+h) +f(a+2h)+ \dots +f(b-h)+ f_{b}/2\right),\label{eq:trapez1} \tag{5}\end{equation}$$ with a global error which goes like $O(h^2)$. Hereafter we use the shorthand notations $f_{-h}=f(x_0-h)$, $f_{0}=f(x_0)$and $f_{h}=f(x_0+h)$.The correct mathematical expression for the local error for the trapezoidal rule is $$\int_a^bf(x)dx -\frac{b-a}{2}\left[f(a)+f(b)\right]=-\frac{h^3}{12}f^{(2)}(\xi),$$ and the global error reads $$\int_a^bf(x)dx -T_h(f)=-\frac{b-a}{12}h^2f^{(2)}(\xi),$$ where $T_h$ is the trapezoidal result and $\xi \in [a,b]$.The trapezoidal rule is easy to implement numerically through the following simple algorithm * Choose the number of mesh points and fix the step length. * calculate $f(a)$ and $f(b)$ and multiply with $h/2$. * Perform a loop over $n=1$ to $n-1$ ($f(a)$ and $f(b)$ are known) and sum up the terms $f(a+h) +f(a+2h)+f(a+3h)+\dots +f(b-h)$. Each step in the loop corresponds to a given value $a+nh$. * Multiply the final result by $h$ and add $hf(a)/2$ and $hf(b)/2$.We use the same function and integrate now using the trapoezoidal rule.
###Code
import numpy as np
from sympy import Symbol, integrate
# function for the trapezoidal rule
def Trapez(a,b,f,n):
h = (b-a)/float(n)
s = 0
x = a
for i in range(1,n,1):
x = x+h
s = s+ f(x)
s = 0.5*(f(a)+f(b)) +s
return h*s
# function to integrate
def function(x):
return sin(2*pi*x)
# define integration limits and integration points
a = 0.0; b = 0.5;
n = 100
Exact = 1./pi
print("Relative error= ", abs( (Trapez(a,b,function,n)-Exact)/Exact))
###Output
Relative error= 8.224805627923717e-05
###Markdown
Simpsons' ruleInstead of using the above first-order polynomials approximations for $f$, we attempt at using a second-order polynomials.In this case we need three points in order to define a second-order polynomial approximation $$f(x) \approx P_2(x)=a_0+a_1x+a_2x^2.$$ Using again Lagrange's interpolation formula we have $$P_2(x)=\frac{(x-x_0)(x-x_1)}{(x_2-x_0)(x_2-x_1)}y_2+ \frac{(x-x_0)(x-x_2)}{(x_1-x_0)(x_1-x_2)}y_1+ \frac{(x-x_1)(x-x_2)}{(x_0-x_1)(x_0-x_2)}y_0.$$ Inserting this formula in the integral of Eq. ([2](eq:hhint)) we obtain $$\int_{-h}^{+h}f(x)dx=\frac{h}{3}\left(f_h + 4f_0 + f_{-h}\right)+O(h^5),$$ which is Simpson's rule. Note that the improved accuracy in the evaluation ofthe derivatives gives a better error approximation, $O(h^5)$ vs.\ $O(h^3)$ .But this is again the *local error approximation*. Using Simpson's rule we can easily computethe integral of Eq. ([1](eq:integraldef)) to be $$\begin{equation} I=\int_a^bf(x) dx=\frac{h}{3}\left(f(a) + 4f(a+h) +2f(a+2h)+ \dots +4f(b-h)+ f_{b}\right),\label{eq:simpson} \tag{6}\end{equation}$$ with a global error which goes like $O(h^4)$. More formal expressions for the local and global errors are for the local error $$\int_a^bf(x)dx -\frac{b-a}{6}\left[f(a)+4f((a+b)/2)+f(b)\right]=-\frac{h^5}{90}f^{(4)}(\xi),$$ and for the global error $$\int_a^bf(x)dx -S_h(f)=-\frac{b-a}{180}h^4f^{(4)}(\xi).$$ with $\xi\in[a,b]$ and $S_h$ the results obtained with Simpson's method.The method can easily be implemented numerically through the following simple algorithm * Choose the number of mesh points and fix the step. * calculate $f(a)$ and $f(b)$ * Perform a loop over $n=1$ to $n-1$ ($f(a)$ and $f(b)$ are known) and sum up the terms $4f(a+h) +2f(a+2h)+4f(a+3h)+\dots +4f(b-h)$. Each step in the loop corresponds to a given value $a+nh$. Odd values of $n$ give $4$ as factor while even values yield $2$ as factor. * Multiply the final result by $\frac{h}{3}$.
###Code
from math import sin, pi
import numpy as np
from sympy import Symbol, integrate
# function for the trapezoidal rule
def Simpson(a,b,f,n):
h = (b-a)/float(n)
sum = f(a)/float(2);
for i in range(1,n):
sum = sum + f(a+i*h)*(3+(-1)**(i+1))
sum = sum + f(b)/float(2)
return sum*h/3.0
# function to integrate
def function(x):
return sin(2*pi*x)
# define integration limits and integration points
a = 0.0; b = 0.5;
n = 100
Exact = 1./pi
print("Relative error= ", abs( (Simpson(a,b,function,n)-Exact)/Exact))
###Output
Relative error= 5.412252157986472e-09
###Markdown
We see that Simpson's rule gives a much better estimation of the relative error with the same amount of points as we had for the Rectangle rule and the Trapezoidal rule. Symbolic integrationWe could also use the symbolic mathematics. Here Python comes to our rescue with [SymPy](https://www.sympy.org/en/index.html), which is a Python library for symbolic mathematics.Here's an example on how you could use **Sympy** where we compare the symbolic calculation with anintegration of a function $f(x)$ by the Trapezoidal rule.Here we show anexample code that evaluates the integral$\int_0^1 dx x^2 = 1/3$.The following code for the trapezoidal rule allows you to plot the relative error by comparing with the exact result. By increasing to $10^8$ points one arrives at a region where numerical errors start to accumulate.
###Code
%matplotlib inline
from math import log10
import numpy as np
from sympy import Symbol, integrate
import matplotlib.pyplot as plt
# function for the trapezoidal rule
def Trapez(a,b,f,n):
h = (b-a)/float(n)
s = 0
x = a
for i in range(1,n,1):
x = x+h
s = s+ f(x)
s = 0.5*(f(a)+f(b)) +s
return h*s
# function to compute pi
def function(x):
return x*x
# define integration limits
a = 0.0; b = 1.0;
# find result from sympy
# define x as a symbol to be used by sympy
x = Symbol('x')
exact = integrate(function(x), (x, a, b))
# set up the arrays for plotting the relative error
n = np.zeros(9); y = np.zeros(9);
# find the relative error as function of integration points
for i in range(1, 8, 1):
npts = 10**i
result = Trapez(a,b,function,npts)
RelativeError = abs((exact-result)/exact)
n[i] = log10(npts); y[i] = log10(RelativeError);
plt.plot(n,y, 'ro')
plt.xlabel('n')
plt.ylabel('Relative error')
plt.show()
###Output
_____no_output_____
###Markdown
Energy ConservationEnergy conservation is most convenient as a strategy for addressingproblems where time does not appear. For example, a particle goesfrom position $x_0$ with speed $v_0$, to position $x_f$; what is itsnew speed? However, it can also be applied to problems where timedoes appear, such as in solving for the trajectory $x(t)$, orequivalently $t(x)$.Energy is conserved in the case where the potential energy, $V(\boldsymbol{r})$, depends only on position, and not on time. The force is determined by $V$, $$\begin{equation}\boldsymbol{F}(\boldsymbol{r})=-\boldsymbol{\nabla} V(\boldsymbol{r}).\label{_auto1} \tag{7}\end{equation}$$ We say a force is conservative if it satisfies the following conditions:1. The force $\boldsymbol{F}$ acting on an object only depends on the position $\boldsymbol{r}$, that is $\boldsymbol{F}=\boldsymbol{F}(\boldsymbol{r})$.2. For any two points $\boldsymbol{r}_1$ and $\boldsymbol{r}_2$, the work done by the force $\boldsymbol{F}$ on the displacement between these two points is independent of the path taken.3. Finally, the **curl** of the force is zero $\boldsymbol{\nabla}\times\boldsymbol{F}=0$.The energy $E$ of a given system is defined as the sum of kinetic and potential energies, $$E=K+V(\boldsymbol{r}).$$ We define the potential energy at a point $\boldsymbol{r}$ as the negative work done from a starting point $\boldsymbol{r}_0$ to a final point $\boldsymbol{r}$ $$V(\boldsymbol{r})=-W(\boldsymbol{r}_0\rightarrow\boldsymbol{r})= -\int_{\boldsymbol{r}_0}^{\boldsymbol{r}}d\boldsymbol{r}'\boldsymbol{F}(\boldsymbol{r}').$$ If the potential depends on the path taken between these two points there is no unique potential.As an example, let us study a classical electron which moves in the $x$-direction along a surface. The force from the surface is $$\boldsymbol{F}(x)=-F_0\sin{(\frac{2\pi x}{b})}\boldsymbol{e}_1.$$ The constant $b$ represents the distance between atoms at the surface of the material, $F_0$ is a constant and $x$ is the position of the electron.This is indeed a conservative force since it depends only on positionand its **curl** is zero, that is $-\boldsymbol{\nabla}\times \boldsymbol{F}=0$. This means that energy is conserved and theintegral over the work done by the force is independent of the pathtaken. Using the work-energy theorem we can find the work $W$ done whenmoving an electron from a position $x_0$ to a final position $x$through the integral $$W=\int_{x_0}^x \boldsymbol{F}(x')dx' = -\int_{x_0}^x F_0\sin{(\frac{2\pi x'}{b})} dx',$$ which results in $$W=\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right].$$ Since this is related to the change in kinetic energy we have, with $v_0$ being the initial velocity at a time $t_0$, $$v = \pm\sqrt{\frac{2}{m}\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right]+v_0^2}.$$ The potential energy, due to energy conservation is $$V(x)=V(x_0)+\frac{1}{2}mv_0^2-\frac{1}{2}mv^2,$$ with $v$ given by the velocity from above.We can now, in order to find a more explicit expression for thepotential energy at a given value $x$, define a zero level value forthe potential. The potential is defined, using the work-energytheorem, as $$V(x)=V(x_0)+\int_{x_0}^x (-F(x'))dx',$$ and if you recall the definition of the indefinite integral, we can rewrite this as $$V(x)=\int (-F(x'))dx'+C,$$ where $C$ is an undefined constant. The force is defined as thegradient of the potential, and in that case the undefined constantvanishes. The constant does not affect the force we derive from thepotential.We have then $$V(x)=V(x_0)-\int_{x_0}^x \boldsymbol{F}(x')dx',$$ which results in $$V(x)=-\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right]+V(x_0).$$ We can now define $$-\frac{F_0b}{2\pi}\cos{(\frac{2\pi x_0}{b})}=V(x_0),$$ which gives $$V(x)=-\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}\right].$$ We have defined work as the energy resulting from a net force actingon an object (or sseveral objects), that is $$W(\boldsymbol{r}\rightarrow \boldsymbol{r}+d\boldsymbol{r})= \boldsymbol{F}(\boldsymbol{r})d\boldsymbol{r}.$$ If we write out this for each component we have $$W(\boldsymbol{r}\rightarrow \boldsymbol{r}+d\boldsymbol{r})=\boldsymbol{F}(\boldsymbol{r})d\boldsymbol{r}=F_xdx+F_ydy+F_zdz.$$ The work done from an initial position to a final one defines also the difference in potential energies $$W(\boldsymbol{r}\rightarrow \boldsymbol{r}+d\boldsymbol{r})=-\left[V(\boldsymbol{r}+d\boldsymbol{r})-V(\boldsymbol{r})\right].$$ We can write out the differences in potential energies as $$V(\boldsymbol{r}+d\boldsymbol{r})-V(\boldsymbol{r})=V(x+dx,y+dy,z+dz)-V(x,y,z)=dV,$$ and using the expression the differential of a multi-variable function $f(x,y,z)$ $$df=\frac{\partial f}{\partial x}dx+\frac{\partial f}{\partial y}dy+\frac{\partial f}{\partial z}dz,$$ we can write the expression for the work done as $$W(\boldsymbol{r}\rightarrow \boldsymbol{r}+d\boldsymbol{r})=-dV=-\left[\frac{\partial V}{\partial x}dx+\frac{\partial V}{\partial y}dy+\frac{\partial V}{\partial z}dz \right].$$ Comparing the last equation with $$W(\boldsymbol{r}\rightarrow \boldsymbol{r}+d\boldsymbol{r})=F_xdx+F_ydy+F_zdz,$$ we have $$F_xdx+F_ydy+F_zdz=-\left[\frac{\partial V}{\partial x}dx+\frac{\partial V}{\partial y}dy+\frac{\partial V}{\partial z}dz \right],$$ leading to $$F_x=-\frac{\partial V}{\partial x},$$ and $$F_y=-\frac{\partial V}{\partial y},$$ and $$F_z=-\frac{\partial V}{\partial z},$$ or just $$\boldsymbol{F}=-\frac{\partial V}{\partial x}\boldsymbol{e}_1-\frac{\partial V}{\partial y}\boldsymbol{e}_2-\frac{\partial V}{\partial z}\boldsymbol{e}_3=-\boldsymbol{\nabla}V(\boldsymbol{r}).$$ And this connection is the one we wanted to show. Net EnergyThe net energy, $E=V+K$ where $K$ is the kinetic energy, is then conserved, $$\begin{eqnarray}\frac{d}{dt}(K+V)&=&\frac{d}{dt}\left(\frac{m}{2}(v_x^2+v_y^2+v_z^2)+V(\boldsymbol{r})\right)\\\nonumber&=&m\left(v_x\frac{dv_x}{dt}+v_y\frac{dv_y}{dt}+v_z\frac{dv_z}{dt}\right)+\partial_xV\frac{dx}{dt}+\partial_yV\frac{dy}{dt}+\partial_zV\frac{dz}{dt}\\\nonumber&=&v_xF_x+v_yF_y+v_zF_z-F_xv_x-F_yv_y-F_zv_z=0.\end{eqnarray}$$ The same proof can be written more compactly with vector notation, $$\begin{eqnarray}\frac{d}{dt}\left(\frac{m}{2}v^2+V(\boldsymbol{r})\right)&=&m\boldsymbol{v}\cdot\dot{\boldsymbol{v}}+\boldsymbol{\nabla} V(\boldsymbol{r})\cdot\dot{\boldsymbol{r}}\\\nonumber&=&\boldsymbol{v}\cdot\boldsymbol{F}-\boldsymbol{F}\cdot\boldsymbol{v}=0.\end{eqnarray}$$ Inverting the expression for kinetic energy, $$\begin{equation}v=\sqrt{2K/m}=\sqrt{2(E-V)/m},\label{_auto2} \tag{8}\end{equation}$$ allows one to solve for the one-dimensional trajectory $x(t)$, by finding $t(x)$, $$\begin{equation}t=\int_{x_0}^x \frac{dx'}{v(x')}=\int_{x_0}^x\frac{dx'}{\sqrt{2(E-V(x'))/m}}.\label{_auto3} \tag{9}\end{equation}$$ Note this would be much more difficult in higher dimensions, becauseyou would have to determine which points, $x,y,z$, the particles mightreach in the trajectory, whereas in one dimension you can typicallytell by simply seeing whether the kinetic energy is positive at everypoint between the old position and the new position. The Earth-Sun systemWe will now venture into a study of a system which is energyconserving. The aim is to see if we (since it is not possible to solvethe general equations analytically) we can develop stable numericalalgorithms whose results we can trust!We solve the equations of motion numerically. We will also computequantities like the energy numerically.We start with a simpler case first, the Earth-Sun system in two dimensions only. The gravitational force $F_G$ on the earth from the sun is $$\boldsymbol{F}_G=-\frac{GM_{\odot}M_E}{r^3}\boldsymbol{r},$$ where $G$ is the gravitational constant, $$M_E=6\times 10^{24}\mathrm{Kg},$$ the mass of Earth, $$M_{\odot}=2\times 10^{30}\mathrm{Kg},$$ the mass of the Sun and $$r=1.5\times 10^{11}\mathrm{m},$$ is the distance between Earth and the Sun. The latter defines what we call an astronomical unit **AU**.From Newton's second law we have then for the $x$ direction $$\frac{d^2x}{dt^2}=-\frac{F_{x}}{M_E},$$ and $$\frac{d^2y}{dt^2}=-\frac{F_{y}}{M_E},$$ for the $y$ direction.Here we will use that $x=r\cos{(\theta)}$, $y=r\sin{(\theta)}$ and $$r = \sqrt{x^2+y^2}.$$ We can rewrite $$F_{x}=-\frac{GM_{\odot}M_E}{r^2}\cos{(\theta)}=-\frac{GM_{\odot}M_E}{r^3}x,$$ and $$F_{y}=-\frac{GM_{\odot}M_E}{r^2}\sin{(\theta)}=-\frac{GM_{\odot}M_E}{r^3}y,$$ for the $y$ direction.We can rewrite these two equations $$F_{x}=-\frac{GM_{\odot}M_E}{r^2}\cos{(\theta)}=-\frac{GM_{\odot}M_E}{r^3}x,$$ and $$F_{y}=-\frac{GM_{\odot}M_E}{r^2}\sin{(\theta)}=-\frac{GM_{\odot}M_E}{r^3}y,$$ as four first-order coupled differential equations 81 <<<!!MATH_BLOCK 82 <<<!!MATH_BLOCK 83 <<<!!MATH_BLOCK $$\frac{dy}{dt}=v_y.$$ The four coupled differential equations 85 <<<!!MATH_BLOCK 86 <<<!!MATH_BLOCK 87 <<<!!MATH_BLOCK $$\frac{dy}{dt}=v_y,$$ can be turned into dimensionless equations or we can introduce astronomical units with $1$ AU = $1.5\times 10^{11}$. Using the equations from circular motion (with $r =1\mathrm{AU}$) $$\frac{M_E v^2}{r} = F = \frac{GM_{\odot}M_E}{r^2},$$ we have $$GM_{\odot}=v^2r,$$ and using that the velocity of Earth (assuming circular motion) is$v = 2\pi r/\mathrm{yr}=2\pi\mathrm{AU}/\mathrm{yr}$, we have $$GM_{\odot}= v^2r = 4\pi^2 \frac{(\mathrm{AU})^3}{\mathrm{yr}^2}.$$ The four coupled differential equations can then be discretized using Euler's method as (with step length $h$) 92 <<<!!MATH_BLOCK 93 <<<!!MATH_BLOCK 94 <<<!!MATH_BLOCK $$y_{i+1}=y_i+hv_{y,i},$$ The code here implements Euler's method for the Earth-Sun system using a more compact way of representing the vectors. Alternatively, you could have spelled out all the variables $v_x$, $v_y$, $x$ and $y$ as one-dimensional arrays.
###Code
# Common imports
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
DeltaT = 0.001
#set up arrays
tfinal = 10 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Start integrating using Euler's method
for i in range(n-1):
# Set up the acceleration
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using Euler's forward method
v[i+1] = v[i] + DeltaT*a
r[i+1] = r[i] + DeltaT*v[i]
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
#ax.set_xlim(0, tfinal)
ax.set_ylabel('y[AU]')
ax.set_xlabel('x[AU]')
ax.plot(r[:,0], r[:,1])
fig.tight_layout()
save_fig("EarthSunEuler")
plt.show()
###Output
_____no_output_____
###Markdown
We notice here that Euler's method doesn't give a stable orbit. Itmeans that we cannot trust Euler's method. In a deeper way, as we willsee in homework 5, Euler's method does not conserve energy. It is anexample of an integrator which is not[symplectic](https://en.wikipedia.org/wiki/Symplectic_integrator).Here we present thus two methods, which with simple changes allow us to avoid these pitfalls. The simplest possible extension is the so-called Euler-Cromer method.The changes we need to make to our code are indeed marginal here.We need simply to replace
###Code
r[i+1] = r[i] + DeltaT*v[i]
###Output
_____no_output_____
###Markdown
in the above code with the velocity at the new time $t_{i+1}$
###Code
r[i+1] = r[i] + DeltaT*v[i+1]
###Output
_____no_output_____
###Markdown
By this simple caveat we get stable orbits.Below we derive the Euler-Cromer method as well as one of the most utlized algorithms for sovling the above type of problems, the so-called Velocity-Verlet method. Let us repeat Euler's method.We have a differential equation $$\begin{equation}y'(t_i)=f(t_i,y_i) \label{_auto4} \tag{10}\end{equation}$$ and if we truncate at the first derivative, we have from the Taylor expansion $$\begin{equation}y_{i+1}=y(t_i) + (\Delta t) f(t_i,y_i) + O(\Delta t^2), \label{eq:euler} \tag{11}\end{equation}$$ which when complemented with $t_{i+1}=t_i+\Delta t$ formsthe algorithm for the well-known Euler method. Note that at every step we make an approximation errorof the order of $O(\Delta t^2)$, however the total error is the sum over allsteps $N=(b-a)/(\Delta t)$ for $t\in [a,b]$, yielding thus a global error which goes like$NO(\Delta t^2)\approx O(\Delta t)$. To make Euler's method more precise we can obviouslydecrease $\Delta t$ (increase $N$), but this can lead to loss of numerical precision.Euler's method is not recommended for precision calculation,although it is handy to use in order to get a firstview on how a solution may look like.Euler's method is asymmetric in time, since it uses information about the derivative at the beginningof the time interval. This means that we evaluate the position at $y_1$ using the velocityat $v_0$. A simple variation is to determine $x_{n+1}$ using the velocity at$v_{n+1}$, that is (in a slightly more generalized form) $$\begin{equation} y_{n+1}=y_{n}+ v_{n+1}+O(\Delta t^2)\label{_auto5} \tag{12}\end{equation}$$ and $$\begin{equation}v_{n+1}=v_{n}+(\Delta t) a_{n}+O(\Delta t^2).\label{_auto6} \tag{13}\end{equation}$$ The acceleration $a_n$ is a function of $a_n(y_n, v_n, t_n)$ and needs to be evaluatedas well. This is the Euler-Cromer method. Deriving the Velocity-Verlet MethodLet us stay with $x$ (position) and $v$ (velocity) as the quantities we are interested in.We have the Taylor expansion for the position given by $$x_{i+1} = x_i+(\Delta t)v_i+\frac{(\Delta t)^2}{2}a_i+O((\Delta t)^3).$$ The corresponding expansion for the velocity is $$v_{i+1} = v_i+(\Delta t)a_i+\frac{(\Delta t)^2}{2}v^{(2)}_i+O((\Delta t)^3).$$ Via Newton's second law we have normally an analytical expression for the derivative of the velocity, namely $$a_i= \frac{d^2 x}{dt^2}\vert_{i}=\frac{d v}{dt}\vert_{i}= \frac{F(x_i,v_i,t_i)}{m}.$$ If we add to this the corresponding expansion for the derivative of the velocity $$v^{(1)}_{i+1} = a_{i+1}= a_i+(\Delta t)v^{(2)}_i+O((\Delta t)^2)=a_i+(\Delta t)v^{(2)}_i+O((\Delta t)^2),$$ and retain only terms up to the second derivative of the velocity since our error goes as $O(h^3)$, we have $$(\Delta t)v^{(2)}_i\approx a_{i+1}-a_i.$$ We can then rewrite the Taylor expansion for the velocity as $$v_{i+1} = v_i+\frac{(\Delta t)}{2}\left( a_{i+1}+a_{i}\right)+O((\Delta t)^3).$$ Our final equations for the position and the velocity become then $$x_{i+1} = x_i+(\Delta t)v_i+\frac{(\Delta t)^2}{2}a_{i}+O((\Delta t)^3),$$ and $$v_{i+1} = v_i+\frac{(\Delta t)}{2}\left(a_{i+1}+a_{i}\right)+O((\Delta t)^3).$$ Note well that the term $a_{i+1}$ depends on the position at $x_{i+1}$. This means that you need to calculate the position at the updated time $t_{i+1}$ before the computing the next velocity. Note also that the derivative of the velocity at the time$t_i$ used in the updating of the position can be reused in the calculation of the velocity update as well. We can now easily add the Verlet method to our original code as
###Code
DeltaT = 0.01
#set up arrays
tfinal = 10 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Start integrating using the Velocity-Verlet method
for i in range(n-1):
# Set up forces, air resistance FD, note now that we need the norm of the vecto
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using the Velocity-Verlet method
r[i+1] = r[i] + DeltaT*v[i]+0.5*(DeltaT**2)*a
rabs = sqrt(sum(r[i+1]*r[i+1]))
anew = -4*(pi**2)*r[i+1]/(rabs**3)
v[i+1] = v[i] + 0.5*DeltaT*(a+anew)
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
ax.set_ylabel('y[AU]')
ax.set_xlabel('x[AU]')
ax.plot(r[:,0], r[:,1])
fig.tight_layout()
save_fig("EarthSunVV")
plt.show()
###Output
_____no_output_____
###Markdown
You can easily generalize the calculation of the forces by defining a functionwhich takes in as input the various variables. We leave this as a challenge to you. Additional Material: Link between Line Integrals and Conservative forcesThe concept of line integrals plays an important role in our discussion of energy conservation,our definition of potentials and conservative forces.Let us remind ourselves of some the basic elements (most of you mayhave seen this in a calculus course under the general topic of vectorfields).We define a path integration $C$, that is we integratefrom a point $\boldsymbol{r}_1$ to a point $\boldsymbol{r}_2$. Let us assume that the path $C$ is represented by an arc length $s$. In three dimension we have the following representation of $C$ $$\boldsymbol{r}(s)=x(s)\boldsymbol{e}_1+y(s)\boldsymbol{e}_2+z(s)\boldsymbol{e}_3,$$ then our integral of a function $f(x,y,z)$ along the path $C$ is defined as $$\int_Cf(x,y,z)ds=\int_a^bf\left(x(s),y(s),z(s)\right)ds,$$ where the initial and final points are $a$ and $b$, respectively.With the definition of a line integral, we can in tunrn set up thetheorem of independence of integration path.Let us define$f(x,y,z)$, $g(x,y,z)$ and $h(x,y,z)$ to be functions which aredefined and continuous in a domain $D$ in space. Then a line integrallike the above is said to be independent of path in $D$, if for everypair of endpoints $a$ and $b$ in $D$ the value of the integral is thesame for all paths $C$ in $D$ starting from a point $a$ and ending ina point $b$. The integral depends thus only on the integration limitsand not on the path.An expression of the form $$fdx+gdy+hdz,$$ where $f$, $g$ and $h$ are functions defined in $D$, is a called a first-order differential formin three variables.The form is said to be exact if it is the differential $$du= \frac{\partial u}{\partial x}dx+\frac{\partial u}{\partial y}dy+\frac{\partial u}{\partial z}dz,$$ of a differentiable function $u(x,y,z)$ everywhere in $D$, that is $$du=fdx+gdy+hdz.$$ It is said to be exact if and only if we can then set $$f=\frac{\partial u}{\partial x},$$ and $$g=\frac{\partial u}{\partial y},$$ and $$h=\frac{\partial u}{\partial z},$$ everywhere in the domain $D$.In vector language the above means that the differential form $$fdx+gdy+hdz,$$ is exact in $D$ if and only if the vector function (it could be a force, or velocity, acceleration or other vectors we encounter in this course) $$\boldsymbol{F}=f\boldsymbol{e}_1+g\boldsymbol{e}_2+h\boldsymbol{e}_3,$$ is the gradient of a function $u(x,y,z)$ $$\boldsymbol{v}=\boldsymbol{\nabla}u=\frac{\partial u}{\partial x}\boldsymbol{e}_1+\frac{\partial u}{\partial y}\boldsymbol{e}_2+\frac{\partial u}{\partial z}\boldsymbol{e}_3.$$ If this is the case, we can state the path independence theorem whichstates that with functions $f(x,y,z)$, $g(x,y,z)$ and $h(x,y,z)$ that fulfill the aboveexactness conditions, the line integral $$\int_C\left(fdx+gdy+hdz\right),$$ is independent of path in $D$ if and only if the differential form under the integral sign is exact in $D$.This is the path independence theorem. We will not give a proof of the theorem. You can find this in any vector analysis chapter in a mathematics textbook.We note however that the path integral from a point $p$ to a final point $q$ is given by $$\int_p^q\left(fdx+gdy+hdz\right)=\int_p^q\left(\frac{\partial u}{\partial x}dx+\frac{\partial u}{\partial y}dy+\frac{\partial u}{\partial z}dz\right)=\int_p^qdu.$$ Assume now that we have a dependence on a variable $s$ for $x$, $y$ and $z$. We have then $$\int_p^qdu=\int_{s_1}^{s_2}\frac{du}{ds}ds = u(x(s),y(s),z(s))\vert_{s=s_1}^{s=s_2}=u(q)-u(p).$$ This last equation $$\int_p^q\left(fdx+gdy+hdz\right)=u(q)-u(p),$$ is the analogue of the usual formula $$\int_a^bf(x)dx=F(x)\vert_a^b=F(b)-F(a),$$ with $F'(x)=f(x)$.We remember that a the work done by a force$\boldsymbol{F}=f\boldsymbol{e}_1+g\boldsymbol{e}_2+h\boldsymbol{e}_3$ on a displacemnt $d\boldsymbol{r}$is $$W=\int_C\boldsymbol{F}d\boldsymbol{r}=\int_C(fdx+gdy+hdz).$$ From the path independence theorem, we know that this has to result inthe difference between the two endpoints only. This is exact if andonly if the force is the force $\boldsymbol{F}$ is the gradient of a scalarfunction $u$. We call this scalar function, which depends only thepositions $x,y,z$ for the potential energy $V(x,y,z)=V(\boldsymbol{r})$.We have thus $$\boldsymbol{F}(\boldsymbol{r})\propto \boldsymbol{\nabla}V(\boldsymbol{r}),$$ and we define this as $$\boldsymbol{F}(\boldsymbol{r})= -\boldsymbol{\nabla}V(\boldsymbol{r}).$$ Such a force is called **a conservative force**. The above expression can be used to demonstrateenergy conservation.Finally we can define the criterion for exactness and independence ofpath. This theorem states that if $f(x,y,z)$, $g(x,y,z)$ and$h(x,y,z)$ are continuous functions with continuous first partial derivatives in the domain $D$,then the line integral $$\int_C\left(fdx+gdy+hdz\right),$$ is independent of path in $D$ when $$\frac{\partial h}{\partial y}=\frac{\partial g}{\partial z},$$ and $$\frac{\partial f}{\partial z}=\frac{\partial h}{\partial x},$$ and $$\frac{\partial g}{\partial x}=\frac{\partial f}{\partial y}.$$ This leads to the **curl** of $\boldsymbol{F}$ being zero $$\boldsymbol{\nabla}\times\boldsymbol{F}=\boldsymbol{\nabla}\times\left(-\boldsymbol{\nabla}V(\boldsymbol{r})\right)=0!$$ A conservative force $\boldsymbol{F}$ is a defined as the partial derivative of a scalar potential which depends only on the position, $$\boldsymbol{F}(\boldsymbol{r})= -\boldsymbol{\nabla}V(\boldsymbol{r}).$$ This leads to conservation of energy and a path independent line integral as long as the curl of the force is zero, that is $$\boldsymbol{\nabla}\times\boldsymbol{F}=\boldsymbol{\nabla}\times\left(-\boldsymbol{\nabla}V(\boldsymbol{r})\right)=0.$$ Exercises Exercise: Conservation laws, Energy and momentumHow do we define a conservative force?A conservative force is a force whose property is that the total workdone in moving an object between two points is independent of thetaken path. This means that the work on an object under the influenceof a conservative force, is independent on the path of the object. Itdepends only on the spatial degrees of freedom and it is possible toassign a numerical value for the potential at any point. It leads toconservation of energy. The gravitational force is an example of aconservative force.If you wish to read more about conservative forces or not, Feyman's lectures from 1963 are quite interesting.He states for example that **All fundamental forces in nature appear to be conservative**.This statement was made while developing his argument that *there are no nonconservative forces*.You may enjoy the link to [Feynman's lecture](http://www.feynmanlectures.caltech.edu/I_14.html).An important condition for the final work to be independent of the path is that the **curl** of the force is zero, thatis $$\boldsymbol{\nabla} \times \boldsymbol{F}=0$$ Use the work-energy theorem to show that energy is conserved with a conservative force.The work-energy theorem states that the work done $W$ by a force $\boldsymbol{F}$ that moves an object from a position $\boldsymbol{r}_0$ to a new position $\boldsymbol{r}_1$ $$W=\int_{\boldsymbol{r}_0}^{\boldsymbol{r}_1}\boldsymbol{F}\boldsymbol{dr}=\frac{1}{2}mv_1^2-\frac{1}{2}mv_0^2,$$ where $v_1^2$ is the velocity squared at a time $t_1$ and $v_0^2$ the corresponding quantity at a time $t_0$.The work done is thus the difference in kinetic energies. We can rewrite the above equation as $$\frac{1}{2}mv_1^2=\int_{\boldsymbol{r}_0}^{\boldsymbol{r}_1}\boldsymbol{F}\boldsymbol{dr}+\frac{1}{2}mv_0^2,$$ that is the final kinetic energy is equal to the initial kinetic energy plus the work done by the force over a given path from a position $\boldsymbol{r}_0$ at time $t_0$ to a final position position $\boldsymbol{r}_1$ at a later time $t_1$.Assume that you have only internal two-body forces acting on $N$ objects in an isolated system. The force from object $i$ on object $j$ is $\boldsymbol{f}_{ij}$. Show that the linear momentum is conserved.Here we use Newton's third law and assume that our system is onlyaffected by so-called internal forces. This means that the force$\boldsymbol{f}_{ij}$ from object $i$ acting on object $j$ is equal to theforce acting on object $j$ from object $i$ but with opposite sign,that is $\boldsymbol{f}_{ij}=-\boldsymbol{f}_{ji}$.The total linear momentum is defined as $$\boldsymbol{P}=\sum_{i=1}^N\boldsymbol{p}_i=\sum_{i=1}^Nm_i\boldsymbol{v}_i,$$ where $i$ runs over all objects, $m_i$ is the mass of object $i$ and $\boldsymbol{v}_i$ its corresponding velocity.The force acting on object $i$ from all the other objects is (lowercase letters for individual objects and upper case letters for totalquantities) $$\boldsymbol{f}_i=\sum_{j=1}^N\boldsymbol{f}_{ji}.$$ Summing over all objects the net force is $$\sum_{i=1}^N\boldsymbol{f}_i=\sum_{i=1}^N\sum_{j=1;j\ne i}^N\boldsymbol{f}_{ji}.$$ We are summing freely over all objects with the constraint that $i\ne j$ (no self-interactions). We can now manipulate the double sum as $$\sum_{i=1}^N\sum_{j=1;j\ne i}^N\boldsymbol{f}_{ji}=\sum_{i=1}^N\sum_{j>i}^N(\boldsymbol{f}_{ji}+\boldsymbol{f}_{ij}).$$ Convince yourself about this by setting $N=2$ and $N=3$. Nweton's third law says$\boldsymbol{f}_{ij}=-\boldsymbol{f}_{ji}$, which means we have $$\sum_{i=1}^N\sum_{j=1;j\ne i}^N\boldsymbol{f}_{ji}=\sum_{i=1}^N\sum_{j>i}^N(\boldsymbol{f}_{ji}-\boldsymbol{f}_{ji})=0.$$ The total force due to internal degrees of freedom only is thus $0$.If we then use the definition that $$\sum_{i=1}^N\boldsymbol{f}_i=\sum_{i=1}^Nm_i\frac{d\boldsymbol{v}_i}{dt}=\sum_{i=1}^N\frac{d\boldsymbol{p}_i}{dt}=\frac{d \boldsymbol{P}}{dt}=0,$$ where we assumed that $m_i$ is independent of time, we see that time derivative of the total momentum is zero.We say then that the linear momentum is a constant of the motion. It is conserved. Exercise: Conservation of angular momentum* Define angular momentum and the torque for a single object with external forces only. The angular moment $\boldsymbol{l}_i$ for a given object $i$ is defined as $$\boldsymbol{l}_i = \boldsymbol{r}_i \times \boldsymbol{p}_i,$$ where $\boldsymbol{p}_i=m_i\boldsymbol{v}_i$. With external forces only defining the acceleration and the mass being time independent, the momentum is the integral over the external force as function of time, that is $$\boldsymbol{p}_i(t)=\boldsymbol{p}_i(t_0)+\int_{t_0}^t \boldsymbol{f}_i^{\mathrm{ext}}(t')dt'.$$ The torque for one object is $$\boldsymbol{\tau}_i=\frac{d\boldsymbol{l}_i}{dt} = \frac{dt(\boldsymbol{r}_i \times \boldsymbol{p}_i)}{dt}=\boldsymbol{r}_i \times \frac{d\boldsymbol{p}_i}{dt}=\boldsymbol{r}_i \times \boldsymbol{f}_i,$$ * Define angular momentum and the torque for a system with $N$ objects/particles with external and internal forces. The force from object $i$ on object $j$ is $\boldsymbol{F}_{ij}$.The total angular momentum $\boldsymbol{L}$ is defined as $$\boldsymbol{L}=\sum_{i=1}^N\boldsymbol{l}_i = \sum_{i=1}^N\boldsymbol{r}_i \times \boldsymbol{p}_i.$$ and the total torque is (using the expression for one object from 2a) $$\boldsymbol{\tau}=\sum_{i=1}^N\frac{d\boldsymbol{l}_i}{dt} = \sum_{i=1}^N\boldsymbol{r}_i \times \boldsymbol{f}_i.$$ The force acting on one object is $\boldsymbol{f}_i=\boldsymbol{f}_i^{\mathrm{ext}}+\sum_{j=1}^N\boldsymbol{f}_{ji}$.* With internal forces only, what is the mathematical form of the forces that allows for angular momentum to be conserved? Using the results from 1c, we can rewrite without external forces our torque as $$\boldsymbol{\tau}=\sum_{i=1}^N\frac{\boldsymbol{l}_i}{dt} = \sum_{i=1}^N\boldsymbol{r}_i \times \boldsymbol{f}_i=\sum_{i=1}^N(\boldsymbol{r}_i \times \sum_{j=1}^N\boldsymbol{f}_{ji}),$$ which gives $$\boldsymbol{\tau}=\sum_{i=1}^N\sum_{j=1;j\ne i}^N(\boldsymbol{r}_i \times \boldsymbol{f}_{ji}).$$ We can rewrite this as (convince yourself again about this) $$\boldsymbol{\tau}=\sum_{i=1}^N\sum_{j>i}^N(\boldsymbol{r}_i \times \boldsymbol{f}_{ji}+\boldsymbol{r}_j \times \boldsymbol{f}_{ij}),$$ and using Newton's third law we have $$\boldsymbol{\tau}=\sum_{i=1}^N\sum_{j>i}^N(\boldsymbol{r}_i -\boldsymbol{r}_j) \times \boldsymbol{f}_{ji}.$$ If the force is proportional to $\boldsymbol{r}_i -\boldsymbol{r}_j$ then angular momentum is conserved since the cross-product of a vector with itself is zero. We say thus that angular momentum is a constant of the motion. Exsercise: Example of potentialConsider a particle of mass $m$ moving according to the potential $$V(x,y,z)=A\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}.$$ * Is energy conserved? If so, why? In this exercise $A$ and $a$ are constants. The force is given by the derivative of $V$ with respect to the spatial degrees of freedom and since the potential depends only on position, the force is conservative and energy is conserved. Furthermore, the curl of the force is zero. To see this we need first to compute the derivatives of the potential with respect to $x$, $y$ and $z$.We have that $$F_x = -\frac{\partial V}{\partial x}=-\frac{xA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\},$$ and $$F_y = 0,$$ and $$F_z = -\frac{\partial V}{\partial z}=-\frac{zA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}.$$ The components of the **curl** of $\boldsymbol{F}$ are $$(\boldsymbol{\nabla}\times\boldsymbol{F})_x = \frac{\partial F_y}{\partial z}-\frac{\partial F_z}{\partial y}=0,$$ and $$(\boldsymbol{\nabla}\times\boldsymbol{F})_y = \frac{\partial F_x}{\partial z}-\frac{\partial F_z}{\partial x}=\frac{xzA}{a^4}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}-\frac{xzA}{a^4}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}=0,$$ and $$(\boldsymbol{\nabla}\times\boldsymbol{F})_z = \frac{\partial F_y}{\partial x}-\frac{\partial F_x}{\partial y}=0.$$ The force is a conservative one.* Which of the quantities, $p_x,p_y,p_z$ are conserved?Taking the derivatives with respect to time shows that only $p_y$ is conservedWe see this directly from the above expressions for the force, since the derivative with respect to time of the momentum is simply the force. Thus, only the $y$-component of the momentum is conserved, see the expressions above for the forces,For the next exercise, we also need the following derivatives $$\frac{\partial F_x}{\partial x} = \frac{x^2A}{a^4}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}-\frac{A}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\},$$ and $$\frac{\partial F_y}{\partial y} = 0,$$ and $$\frac{\partial F_z}{\partial z} = \frac{z^2A}{a^4}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}-\frac{A}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\},$$ * Which of the quantities, $L_x,L_y,L_z$ are conserved?Using that $\boldsymbol{L}=\boldsymbol{r}\times\boldsymbol{p}$ and that $$\frac{d\boldsymbol{L}}{dt}=\boldsymbol{r}\times\boldsymbol{F},$$ we have that the different components are $$(\boldsymbol{r}\times\boldsymbol{F})_x = zF_y-yF_z=\frac{yzA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}.$$ and $$(\boldsymbol{r}\times\boldsymbol{F})_y = xF_z-zF_x=-\frac{xzA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}+\frac{xzA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}=0,$$ and $$(\boldsymbol{r}\times\boldsymbol{F})_z = xF_y-yF_x=\frac{yxA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}.$$ Only $L_y$ is conserved. Exercise: forces and potentialsA particle of mass $m$ has velocity $v=\alpha/x$, where $x$ is its displacement.* Find the force $F(x)$ responsible for the motion.Here, since the force is assumed to be conservative (only dependence on $x$), we can use energy conservation.Assuming that the total energy at $t=0$ is $E_0$, we have $$E_0=V(x)+\frac{1}{2}mv^2=V(x)+\frac{1}{2}m\frac{\alpha^2}{x^2}.$$ Taking the derivative wrt $x$ we have $$\frac{dV}{dx}-m\frac{\alpha^2}{x^3}=0,$$ and since $F(x)=-dV/dx$ we have $$F(x)=-m\frac{\alpha^2}{x^3}.$$ A particle is thereafter under the influence of a force $F=-kx+kx^3/\alpha^2$, where $k$ and $\alpha$ are constants and $k$ is positive.* Determine $V(x)$ and discuss the motion. It can be convenient here to make a sketch/plot of the potential as function of $x$.We assume that the potential is zero at say $x=0$. Integrating the force from zero to $x$ gives $$V(x) = -\int_0^x F(x')dx'=\frac{kx^2}{2}-\frac{kx^4}{4\alpha^2}.$$ The following code plots the potential. We have chosen values of $\alpha=k=1.0$. Feel free to experiment with other values. We plot $V(x)$ for a domain of $x\in [-2,2]$.
###Code
import numpy as np
import matplotlib.pyplot as plt
import math
x0= -2.0
xn = 2.1
Deltax = 0.1
alpha = 1.0
k = 1.0
#set up arrays
x = np.arange(x0,xn,Deltax)
n = np.size(x)
V = np.zeros(n)
V = 0.5*k*x*x-0.25*k*(x**4)/(alpha*alpha)
plt.plot(x, V)
plt.xlabel("x")
plt.ylabel("V")
plt.show()
###Output
_____no_output_____
###Markdown
From the plot here (with the chosen parameters) 1. we see that with a given initial velocity we can overcome the potential energy barrierand leave the potential well for good.1. If the initial velocity is smaller (see next exercise) than a certain value, it will remain trapped in the potential well and oscillate back and forth around $x=0$. This is where the potential has its minimum value. 2. If the kinetic energy at $x=0$ equals the maximum potential energy, the object will oscillate back and forth between the minimum potential energy at $x=0$ and the turning points where the kinetic energy turns zero. These are the so-called non-equilibrium points. * What happens when the energy of the particle is $E=(1/4)k\alpha^2$? Hint: what is the maximum value of the potential energy?From the figure we see thatthe potential has a minimum at at $x=0$ then rises until $x=\alpha$ before falling off again. The maximumpotential, $V(x\pm \alpha) = k\alpha^2/4$. If the energy is higher, the particle cannot be contained in thewell. The turning points are thus defined by $x=\pm \alpha$. And from the previous plot you can easily see that this is the case ($\alpha=1$ in the abovementioned Python code). Exercise: Work-energy theorem and conservation lawsThis exercise was partly discussed above. We will study a classical electron which moves in the $x$-direction along a surface. The force from the surface is $$\boldsymbol{F}(x)=-F_0\sin{(\frac{2\pi x}{b})}\boldsymbol{e}_x.$$ The constant $b$ represents the distance between atoms at the surface of the material, $F_0$ is a constant and $x$ is the position of the electron.* Is this a conservative force? And if so, what does that imply?This is indeed a conservative force since it depends only on position and its **curl** is zero. This means that energy is conserved and the integral over the work done by the force is independent of the path taken. * Use the work-energy theorem to find the velocity $v(x)$. Using the work-energy theorem we can find the work $W$ done when moving an electron from a position $x_\0$ to a final position $x$ through the integral $$W=\int_{x_0}^x \boldsymbol{F}(x')dx' = -\int_{x_0}^x F_0\sin{(\frac{2\pi x'}{b})} dx',$$ which results in $$W=\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right].$$ Since this is related to the change in kinetic energy we have, with $v_0$ being the initial velocity at a time $t_0$, $$v = \pm\sqrt{\frac{2}{m}\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right]+v_0^2}.$$ * With the above expression for the force, find the potential energy.The potential energy, due to energy conservation is $$V(x)=V(x_0)+\frac{1}{2}mv_0^2-\frac{1}{2}mv^2,$$ with $v$ given by the previous answer. We can now, in order to find a more explicit expression for the potential energy at a given value $x$, define a zero level value for the potential. The potential is defined , using the work-energy theorem , as $$V(x)=V(x_0)+\int_{x_0}^x (-F(x'))dx',$$ and if you recall the definition of the indefinite integral, we can rewrite this as $$V(x)=\int (-F(x'))dx'+C,$$ where $C$ is an undefined constant. The force is defined as the gradient of the potential, and in that case the undefined constant vanishes. The constant does not affect the force we derive from the potential.We have then $$V(x)=V(x_0)-\int_{x_0}^x \boldsymbol{F}(x')dx',$$ which results in $$V(x)=\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}-\cos{(\frac{2\pi x_0}{b})}\right]+V(x_0).$$ We can now define $$\frac{F_0b}{2\pi}\cos{(\frac{2\pi x_0}{b})}=V(x_0),$$ which gives $$V(x)=\frac{F_0b}{2\pi}\left[\cos{(\frac{2\pi x}{b})}\right].$$ * Make a plot of the potential energy and discuss the equilibrium points where the force on the electron is zero. Discuss the physical interpretation of stable and unstable equilibrium points. Use energy conservation. The following Python code plots the potential
###Code
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
Deltax = 0.01
#set up arrays
xinitial = -2.0
xfinal = 2.0
n = ceil((xfinal-xinitial)/Deltax)
x = np.zeros(n)
for i in range(n):
x[i] = xinitial+i*Deltax
V = np.zeros(n)
# Setting values for the constants.
F0 = 1.0; b = 1.0;
# Defining the potential
V = F0*b/(2*pi)*np.cos(2*pi*x/b)
# Plot position as function of time
fig, ax = plt.subplots()
ax.set_ylabel('V')
ax.set_xlabel('x')
ax.plot(x, V)
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
We have stable equilibrium points for every minimum of the $\cos$ function and unstable equilibrium points where it has its maximimum values. At the minimum the particle has the lowest potential energy and the largest kinetic energy whereas at the maxima it has the largest potential energy and lowest kinetic energy. Exsercise: Rocket, Momentum and massTaylor exercise 3.11. Consider the rocket of mass $M$ moving with velocity $v$. After abrief instant, the velocity of the rocket is $v+\Delta v$ and the massis $M-\Delta M$. Momentum conservation gives $$\begin{eqnarray*}Mv&=&(M-\Delta M)(v+\Delta v)+\Delta M(v-v_e)\\0&=&-\Delta Mv+M\Delta v+\Delta M(v-v_e),\\0&=&M\Delta v-\Delta Mv_e.\end{eqnarray*}$$ In the second step we ignored the term $\Delta M\Delta v$ since weassume it is small. The last equation gives $$\begin{eqnarray}\Delta v&=&\frac{v_e}{M}\Delta M,\\\nonumber\frac{dv}{dt}&=&\frac{v_e}{M}\frac{dM}{dt}.\end{eqnarray}$$ Here we let $\Delta v\rightarrow dv$ and $\Delta M\rightarrow dM$.We have also assumed that $M(t) = M_0-kt$. Integrating the expression with lower limits $v_0=0$ and $M_0$, one finds $$\begin{eqnarray*}v&=&v_e\int_{M_0}^M \frac{dM'}{M'}\\v&=&v_e\ln(M/M_0)\\&=&v_e\ln[(M_0-k t)/M_0].\end{eqnarray*}$$ We have ignored gravity here. If we add gravity as the external force, we get when integrating an additional terms $-gt$, that is $$v=v_e\ln[(M_0-k t)/M_0]-gt.$$ Inserting numbers $v_e=3000$ m/s, $M_0/M=2$ and $g=9.8$ m/s$^{2}$, we find $v=900$ m/s. With $g=0$ the corresponding number is $2100$ m/s, so gravity reduces the speed acquired in the first two minutes to a little less than half its weight-free value.If the thrust $\Delta Mv_e$ is less than the weight $mg$, the rocket will just sit on the ground until it has shed enough mass that the thrust can overcome the weight, definitely not a good design. Exercise: More RocketsThis is a continuation of the previous exercise and most of the relevant background material can be found in Taylor chapter 3.2. Taking the velocity from the previous exercise and integrating over time we find the height $$y(t) = y(t_0=0)+\int_0^tv(t')dt',$$ which gives $$y(t) = v_et\ln{M_0}-v_e\int_0^t \ln{M(t')}dt'-\frac{1}{2}gt^2.$$ To do the integral over time we recall that $M(t')=M_0-\Delta M t'$. We assumed that $\Delta M=k$ is a constant.We use that $M_0-M=kt$ and assume that mass decreases by a constant $k$ times time $t$.We obtain then that the integral gives $$\int_0^t \ln{M(t')}dt' = \int_0^t \ln{(M_0-kt')}dt',$$ and defining the variable $u=M_0-kt'$, with $du=-kdt'$ and the new limits $M_0$ when $t=0$ and $M_0-kt$ when time is equal to $t$, we have $$\int_0^t \ln{M(t')}dt' = \int_0^t \ln{(M_0-kt')}dt'=-\frac{1}{k}\int_{M_0}^{M_0-kt} \ln{(u)}du=-\frac{1}{k}\left[u\ln{(u)}-u\right]_{M_0}^{M_0-kt},$$ and writing out we obtain $$-\frac{1}{k}\left[u\ln{(u)}-u\right]_{M_0}^{M_0-kt} = \frac{1}{k}\left(M_0\ln{M_0}-M\ln{M}\right)-t,$$ Mulitplying with $-v_e$ we have $$-\frac{v_e}{k}\left(M_0\ln{M_0}-M\ln{M}\right)+v_et,$$ which we can rewrite as, using $M_0=M+kt$, $$-\frac{v_e}{k}\left((M+kt)\ln{M_0}-M\ln{M}\right)+v_et=v_et-v_et\ln{M_0}-\frac{Mv_e}{k}\ln{(\frac{M_0}{M})}.$$ Inserting into $y(t)$ we obtain then $$y(t) = v_et-\frac{1}{2}gt^2-\frac{Mv_e}{k}\ln{(\frac{M_0}{M})}.$$ Using the numbers from the previous exercise with $t=2$ min we obtain that $y\approx 40$ km.For exercise 3.14 (5pt) we have the equation of motion which reads $Ma=kv_e-bv$ or $$\frac{Mdv}{kv_e-bv}=dt.$$ We have that $dM/dt =-k$ (assumed a constant rate for mass change). We can then replace $dt$ by $-dM/k$ and we have $$\frac{kdv}{kv_e-bv}=-\frac{dM}{M}.$$ Integrating gives $$v = \frac{kv_e}{b}\left[1-(\frac{M}{M_0})^{b/k}\right].$$ Exercise: Center of massTaylor exercise 3.20. Here Taylor's chapter 3.3 can be of use. This relation will turn out to be very useful when we discuss systems of many classical particles.The definition of the center of mass for $N$ objects can be written as $$M\boldsymbol{R}=\sum_{i=1}^Nm_i\boldsymbol{r}_i,$$ where $m_i$ and $\boldsymbol{r}_i$ are the masses and positions of object $i$, respectively.Assume now that we have a collection of $N_1$ objects with masses $m_{1i}$ and positions $\boldsymbol{r}_{1i}$with $i=1,\dots,N_1$ and a collection of $N_2$ objects with masses $m_{2j}$ and positions $\boldsymbol{r}_{2j}$with $j=1,\dots,N_2$.The total mass of the two-body system is $M=M_1+M_2=\sum_{i=1}^{N_1}m_{1i}+\sum_{j=1}^{N_2}m_{2j}$. The center of mass position $\boldsymbol{R}$ of the whole system satisfies then $$M\boldsymbol{R}=\sum_{i=1}^{N_1}m_{1i}\boldsymbol{r}_{1i}+\sum_{j=1}^{N_2}m_{2j}\boldsymbol{r}_{2j}=M_1\boldsymbol{R}_1+M_2\boldsymbol{R}_2,$$ where $\boldsymbol{R}_1$ and $\boldsymbol{R}_2$ are the the center of mass positions of the two separate bodies and the second equality follows from our rewritten definition of the center of mass applied to each body separately. This is the required result. Exercise: The Earth-Sun problemWe start with the Earth-Sun system in two dimensions only. The gravitational force $F_G$ on the earth from the sun is $$\boldsymbol{F}_G=-\frac{GM_{\odot}M_E}{r^3}\boldsymbol{r},$$ where $G$ is the gravitational constant, $$M_E=6\times 10^{24}\mathrm{Kg},$$ the mass of Earth, $$M_{\odot}=2\times 10^{30}\mathrm{Kg},$$ the mass of the Sun and $$r=1.5\times 10^{11}\mathrm{m},$$ is the distance between Earth and the Sun. The latter defines what we call an astronomical unit **AU**.From Newton's second law we have then for the $x$ direction $$\frac{d^2x}{dt^2}=-\frac{F_{x}}{M_E},$$ and $$\frac{d^2y}{dt^2}=-\frac{F_{y}}{M_E},$$ for the $y$ direction.Here we will use that $x=r\cos{(\theta)}$, $y=r\sin{(\theta)}$ and $$r = \sqrt{x^2+y^2}.$$ We can rewrite these equations $$F_{x}=-\frac{GM_{\odot}M_E}{r^2}\cos{(\theta)}=-\frac{GM_{\odot}M_E}{r^3}x,$$ and $$F_{y}=-\frac{GM_{\odot}M_E}{r^2}\sin{(\theta)}=-\frac{GM_{\odot}M_E}{r^3}y,$$ as four first-order coupled differential equations $$\frac{dv_x}{dt}=-\frac{GM_{\odot}}{r^3}x,$$ and $$\frac{dx}{dt}=v_x,$$ and $$\frac{dv_y}{dt}=-\frac{GM_{\odot}}{r^3}y,$$ and $$\frac{dy}{dt}=v_y.$$ The four coupled differential equations $$\frac{dv_x}{dt}=-\frac{GM_{\odot}}{r^3}x,$$ and $$\frac{dx}{dt}=v_x,$$ and $$\frac{dv_y}{dt}=-\frac{GM_{\odot}}{r^3}y,$$ and $$\frac{dy}{dt}=v_y,$$ can be turned into dimensionless equations or we can introduce astronomical units with $1$ AU = $1.5\times 10^{11}$. Using the equations from circular motion (with $r =1\mathrm{AU}$) $$\frac{M_E v^2}{r} = F = \frac{GM_{\odot}M_E}{r^2},$$ we have $$GM_{\odot}=v^2r,$$ and using that the velocity of Earth (assuming circular motion) is$v = 2\pi r/\mathrm{yr}=2\pi\mathrm{AU}/\mathrm{yr}$, we have $$GM_{\odot}= v^2r = 4\pi^2 \frac{(\mathrm{AU})^3}{\mathrm{yr}^2}.$$ The four coupled differential equations can then be discretized using Euler's method as (with step length $h$) $$v_{x,i+1}=v_{x,i}-h\frac{4\pi^2}{r_i^3}x_i,$$ and $$x_{i+1}=x_i+hv_{x,i},$$ and $$v_{y,i+1}=v_{y,i}-h\frac{4\pi^2}{r_i^3}y_i,$$ and $$y_{i+1}=y_i+hv_{y,i},$$ The code here implements Euler's method for the Earth-Sun system using a more compact way of representing the vectors. Alternatively, you could have spelled out all the variables $v_x$, $v_y$, $x$ and $y$ as one-dimensional arrays.
###Code
# Common imports
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
DeltaT = 0.01
#set up arrays
tfinal = 10 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Start integrating using Euler's method
for i in range(n-1):
# Set up the acceleration
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using Euler's forward method
v[i+1] = v[i] + DeltaT*a
r[i+1] = r[i] + DeltaT*v[i]
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
#ax.set_xlim(0, tfinal)
ax.set_xlabel('x[AU]')
ax.set_ylabel('y[AU]')
ax.plot(r[:,0], r[:,1])
fig.tight_layout()
save_fig("EarthSunEuler")
plt.show()
###Output
_____no_output_____
###Markdown
We notice here that Euler's method doesn't give a stable orbit with for example $\Delta t =0.01$. Itmeans that we cannot trust Euler's method. Euler's method does not conserve energy. It is anexample of an integrator which is not[symplectic](https://en.wikipedia.org/wiki/Symplectic_integrator).Here we present thus two methods, which with simple changes allow usto avoid these pitfalls. The simplest possible extension is theso-called Euler-Cromer method. The changes we need to make to ourcode are indeed marginal here. We need simply to replace
###Code
r[i+1] = r[i] + DeltaT*v[i]
###Output
_____no_output_____
###Markdown
in the above code with the velocity at the new time $t_{i+1}$
###Code
r[i+1] = r[i] + DeltaT*v[i+1]
###Output
_____no_output_____
###Markdown
By this simple caveat we get stable orbits. Below we derive theEuler-Cromer method as well as one of the most utlized algorithms forsolving the above type of problems, the so-called Velocity-Verletmethod.Let us repeat Euler's method.We have a differential equation $$\begin{equation} y'(t_i)=f(t_i,y_i) \label{_auto7} \tag{14}\end{equation}$$ and if we truncate at the first derivative, we have from the Taylor expansion $$y_{i+1}=y(t_i) + (\Delta t) f(t_i,y_i) + O(\Delta t^2),$$ which when complemented with $t_{i+1}=t_i+\Delta t$ formsthe algorithm for the well-known Euler method. Note that at every step we make an approximation errorof the order of $O(\Delta t^2)$, however the total error is the sum over allsteps $N=(b-a)/(\Delta t)$ for $t\in [a,b]$, yielding thus a global error which goes like$NO(\Delta t^2)\approx O(\Delta t)$. To make Euler's method more precise we can obviouslydecrease $\Delta t$ (increase $N$), but this can lead to loss of numerical precision.Euler's method is not recommended for precision calculation,although it is handy to use in order to get a firstview on how a solution may look like.Euler's method is asymmetric in time, since it uses information about the derivative at the beginningof the time interval. This means that we evaluate the position at $y_1$ using the velocityat $v_0$. A simple variation is to determine $x_{n+1}$ using the velocity at$v_{n+1}$, that is (in a slightly more generalized form) $$\begin{equation} y_{n+1}=y_{n}+ v_{n+1}+O(\Delta t^2)\label{_auto8} \tag{15}\end{equation}$$ and $$\begin{equation} v_{n+1}=v_{n}+(\Delta t) a_{n}+O(\Delta t^2).\label{_auto9} \tag{16}\end{equation}$$ The acceleration $a_n$ is a function of $a_n(y_n, v_n, t_n)$ and needs to be evaluatedas well. This is the Euler-Cromer method. It is easy to change the above code and see that with the same time step we get stable results.Let us stay with $x$ (position) and $v$ (velocity) as the quantities we are interested in.We have the Taylor expansion for the position given by $$x_{i+1} = x_i+(\Delta t)v_i+\frac{(\Delta t)^2}{2}a_i+O((\Delta t)^3).$$ The corresponding expansion for the velocity is $$v_{i+1} = v_i+(\Delta t)a_i+\frac{(\Delta t)^2}{2}v^{(2)}_i+O((\Delta t)^3).$$ Via Newton's second law we have normally an analytical expression for the derivative of the velocity, namely $$a_i= \frac{d^2 x}{dt^2}\vert_{i}=\frac{d v}{dt}\vert_{i}= \frac{F(x_i,v_i,t_i)}{m}.$$ If we add to this the corresponding expansion for the derivative of the velocity $$v^{(1)}_{i+1} = a_{i+1}= a_i+(\Delta t)v^{(2)}_i+O((\Delta t)^2)=a_i+(\Delta t)v^{(2)}_i+O((\Delta t)^2),$$ and retain only terms up to the second derivative of the velocity since our error goes as $O(h^3)$, we have $$(\Delta t)v^{(2)}_i\approx a_{i+1}-a_i.$$ We can then rewrite the Taylor expansion for the velocity as $$v_{i+1} = v_i+\frac{(\Delta t)}{2}\left( a_{i+1}+a_{i}\right)+O((\Delta t)^3).$$ Our final equations for the position and the velocity become then $$x_{i+1} = x_i+(\Delta t)v_i+\frac{(\Delta t)^2}{2}a_{i}+O((\Delta t)^3),$$ and $$v_{i+1} = v_i+\frac{(\Delta t)}{2}\left(a_{i+1}+a_{i}\right)+O((\Delta t)^3).$$ Note well that the term $a_{i+1}$ depends on the position at $x_{i+1}$. This means that you need to calculate the position at the updated time $t_{i+1}$ before the computing the next velocity. Note also that the derivative of the velocity at the time$t_i$ used in the updating of the position can be reused in the calculation of the velocity update as well. We can now easily add the Verlet method to our original code as
###Code
DeltaT = 0.01
#set up arrays
tfinal = 10
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Start integrating using the Velocity-Verlet method
for i in range(n-1):
# Set up forces, air resistance FD, note now that we need the norm of the vecto
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using the Velocity-Verlet method
r[i+1] = r[i] + DeltaT*v[i]+0.5*(DeltaT**2)*a
rabs = sqrt(sum(r[i+1]*r[i+1]))
anew = -4*(pi**2)*r[i+1]/(rabs**3)
v[i+1] = v[i] + 0.5*DeltaT*(a+anew)
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
ax.set_xlabel('x[AU]')
ax.set_ylabel('y[AU]')
ax.plot(r[:,0], r[:,1])
fig.tight_layout()
save_fig("EarthSunVV")
plt.show()
###Output
_____no_output_____
###Markdown
You can easily generalize the calculation of the forces by defining a functionwhich takes in as input the various variables. We leave this as a challenge to you.Running the above code for various time steps we see that the Velocity-Verlet is fully stable for various time steps.We can also play around with different initial conditions in order to find the escape velocity from an orbit around the sun with distance one astronomical unit, 1 AU. The theoretical value for the escape velocity, is given by $$v = \sqrt{8\pi^2}{r},$$ and with $r=1$ AU, this means that the escape velocity is $2\pi\sqrt{2}$ AU/yr. To obtain this we required that the kinetic energy of Earth equals the potential energy given by the gravitational force.Setting $$\frac{1}{2}M_{\mathrm{Earth}}v^2=\frac{GM_{\odot}}{r},$$ and with $GM_{\odot}=4\pi^2$ we obtain the above relation for the velocity. Setting an initial velocity say equal to $9$ in the above code, yields a planet (Earth) which escapes a stable orbit around the sun, as seen by running the code here.
###Code
DeltaT = 0.01
#set up arrays
tfinal = 100
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
# setting initial velocity larger than escape velocity
v0 = np.array([0.0,9.0])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Start integrating using the Velocity-Verlet method
for i in range(n-1):
# Set up forces, air resistance FD, note now that we need the norm of the vecto
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using the Velocity-Verlet method
r[i+1] = r[i] + DeltaT*v[i]+0.5*(DeltaT**2)*a
rabs = sqrt(sum(r[i+1]*r[i+1]))
anew = -4*(pi**2)*r[i+1]/(rabs**3)
v[i+1] = v[i] + 0.5*DeltaT*(a+anew)
t[i+1] = t[i] + DeltaT
# Plot position as function of time
fig, ax = plt.subplots()
ax.set_xlabel('x[AU]')
ax.set_ylabel('y[AU]')
ax.plot(r[:,0], r[:,1])
fig.tight_layout()
save_fig("EscapeEarthSunVV")
plt.show()
###Output
_____no_output_____
###Markdown
Exercise Conservative forcesWhich of the following force are conservative? All three forces depend only on $\boldsymbol{r}$ and satisfy the first condition for being conservative.* $\boldsymbol{F}=k(x\boldsymbol{i}+2y\boldsymbol{j}+3z\boldsymbol{k})$ where $k$ is a constant.The **curl** is zero and the force is conservative. The potential energy is upon integration $V(x)=-k(1/2x^2+y^2+3/2z^2)$. Taking the derivative shows that this is indeed the case since it gives back the force.* $\boldsymbol{F}=y\boldsymbol{i}+x\boldsymbol{j}+0\boldsymbol{k}$. This force is also conservative since it depends only on the coordinates and its curl is zero. To find the potential energy, since the integral is path independent, we can choose to integrate along any direction. The simplest is start from $x=0$ as origin and follow a path along the $x$-axis (which gives zero) and then parallel to the $y$-axis, which results in $V(x,y) = -xy$. Taking the derivative with respect to $x$ and $y$ gives us back the expression for the force.* $\boldsymbol{F}=k(-y\boldsymbol{i}+x\boldsymbol{j}+0\boldsymbol{k})$ where $k$ is a constant.Here the **curl** is $(0,0,2)$ and the force is not conservative.* 2d For those which are conservative, find the corresponding potential energy $V$ and verify that direct differentiation that $\boldsymbol{F}=-\boldsymbol{\nabla} V$.See the answers to each exercise above. Exercise: The Lennard-Jones potential[The Lennard-Jones potential](https://en.wikipedia.org/wiki/Lennard-Jones_potential) is often used to describethe interaction between two atoms or ions or molecules. If you end up doing materals science and molecular dynamics calculations, it is very likely that you will encounter this potential model.The expression for the potential energy is $$V(r) = V_0\left((\frac{a}{r})^{12}-(\frac{b}{r})^{6}\right),$$ where $V_0$, $a$ and $b$ are constants and the potential depends only on the relative distance between two objects$i$ and $j$, that is $r=\vert\vert\boldsymbol{r}_i-\boldsymbol{r}_j\vert\vert=\sqrt{(x_i-x_j)^2+(y_i-y_j)^2+(z_i-z_j)^2}$.* Sketch/plot the potential (choose some values for the constants in doing so).The following Python code plots the potential
###Code
# Common imports
import numpy as np
from math import *
import matplotlib.pyplot as plt
Deltar = 0.01
#set up arrays
rinitial = 1.8
rfinal = 3.
n = ceil((rfinal-rinitial)/Deltar)
r = np.zeros(n)
for i in range(n):
r[i] = rinitial+i*Deltar
V = np.zeros(n)
# Initial conditions as compact 2-dimensional arrays
a = 2.0
b = 2.0
V0 = 10.0
V = V0*((a/r)**(12)-(b/r)**6)
# Plot position as function of time
fig, ax = plt.subplots()
#ax.set_xlim(0, tfinal)
ax.set_ylabel('V')
ax.set_xlabel('r')
ax.plot(r, V)
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
* Find and classify the equilibrium points.Here there is only one equilibrium point when we take the derivative of the potential with respect to the relative distance.The derivative with respect to $r$, the relative distance, is $$\frac{dV}{dr} = -6V_0\left(2\frac{a^{12}}{r^{13}}-\frac{b^6}{r^7}\right),$$ and this is zero when $$r = 2^{1/6}\frac{a^2}{b}.$$ If we choose $a=2$ and $b=2$ then $r=2\times 2^{1/6}$. Since the second derivative is positive for all $r$ for our choices of $a$ and $b$ (convince yourself about this), then this value of $r$ has to correspond to a minimum of the potential. This agrees with our graph from the figure above (run the code to produce the figure). * What is the force acting on one of the objects (an atom for example) from the other object? Is this a conservative force?From the previous exercise we have $$\frac{dV}{dr} = -6V_0\left(2\frac{a^{12}}{r^{13}}-\frac{b^6}{r^7}\right).$$ We need the gradient and since the force on particle $i$ is given by $\boldsymbol{F}_i=\boldsymbol{\nabla}_i V(\boldsymbol{r}_i-\boldsymbol{r}_j)$, we obtain $$\boldsymbol{F}_i=6V_0\left(2(\frac{a}{\vert\vert\boldsymbol{r}_i-\boldsymbol{r}_j\vert\vert})^{12}-(\frac{b}{\vert\vert\boldsymbol{r}_i-\boldsymbol{r}_j\vert\vert})^6\right)\frac{\boldsymbol{r}_i-\boldsymbol{r}_j}{\vert\vert\boldsymbol{r}_i-\boldsymbol{r}_j\vert\vert^2}.$$ Here $r = \vert\vert \boldsymbol{r}_i-\boldsymbol{r}_j\vert \vert$.If we have more than two particles, we need to sum over all other particles $j$. We have thus to introduce a sum over all particles $N$. The force on particle $i$ at position $\boldsymbol{r}_i$ from all particles $j$ at their positions $\boldsymbol{r}_j$ results in the equation of motion (note that we have divided by the mass $m$ here) $$\boldsymbol{a}_i=\frac{d^2\boldsymbol{r}_i}{dt^2} = \frac{6V_0}{m} \sum_{j \neq i}^{N}\left(2(\frac{a}{\vert\vert\boldsymbol{r}_i-\boldsymbol{r}_j\vert\vert})^{12}-(\frac{b}{\vert\vert\boldsymbol{r}_i-\boldsymbol{r}_j\vert\vert})^6\right)\frac{\boldsymbol{r}_i-\boldsymbol{r}_j}{\vert\vert\boldsymbol{r}_i-\boldsymbol{r}_j\vert\vert^2}.$$ This is also a conservative force, with zero **curl** as well. Exercise: particle in a new potentialConsider a particle of mass $m$ moving in a one-dimensional potential, $$V(x)=-\alpha\frac{x^2}{2}+\beta\frac{x^4}{4}.$$ * Plot the potential and discuss eventual equilibrium points. Is this a conservative force?The following Python code gives a plot of potential
###Code
# Common imports
import numpy as np
from math import *
import matplotlib.pyplot as plt
Deltax = 0.01
#set up arrays
xinitial = -2.0
xfinal = 2.0
n = ceil((xfinal-xinitial)/Deltax)
x = np.zeros(n)
for i in range(n):
x[i] = xinitial+i*Deltax
V = np.zeros(n)
# Initial conditions as compact 2-dimensional arrays
alpha = 0.81
beta = 0.5
print(sqrt(alpha/beta))
V = -alpha*x*x*0.5 + beta*(x**4)*0.25
# Plot position as function of time
fig, ax = plt.subplots()
#ax.set_xlim(0, tfinal)
ax.set_xlabel('x')
ax.set_ylabel('V[s]')
ax.plot(x, V)
fig.tight_layout()
plt.show()
###Output
1.2727922061357855
###Markdown
Here we have chosen $\alpha=0.81$ and $\beta=0.5$. Taking the derivative of $V$ with respect to $x$ gives two minima (and it is easy to see here that the second derivative is positive) at $x\pm\sqrt{\alpha/\beta}$ and a maximum at $x=0$. The derivative is $$\frac{dV}{dx} = -\alpha x + \beta x^3,$$ which gives when we require that it should equal zero the above values. As we can see from the plot (run the above Python code), we have two so-called stable equilibrium points (where the potential has its minima) and an unstable equilibrium point.The force is conservative since it depends only on $x$ and has a **curl** which is zero.* Compute the second derivative of the potential and find its miminum position(s). Using the Taylor expansion of the potential around its minimum (see Taylor section 5.1) to define a spring constant $k$. Use the spring constant to find the natural (angular) frequency $\omega_0=\sqrt{k/m}$. We call the new spring constant for an effective spring constant.In the solution to the previous exercise we listed the values where the derivatives of the potential are zero.Taking the second derivatives we have that $$\frac{d^2V}{dx^2} = -\alpha + 3\beta x^2,$$ and for $\alpha,\beta > 0$ (we assume they are positive constants) we see that when $x=0$ that the the second derivative is negative, which means this is a maximum. For $x=\pm\sqrt{\alpha/\beta}$ we see that the second derivative is positive. Thus these points correspond to two minima.Assume now we Taylor-expand the potential around one of these minima, say $x_{\mathrm{min}}=\sqrt{\alpha/\beta}$. We have thus $$V(x) = V(x_{\mathrm{min}})+(x-x_{\mathrm{min}})\frac{dV}{dx}\vert_{x_{\mathrm{min}}}+\frac{1}{2}(x-x_{\mathrm{min}})^2\frac{d^2V}{dx^2}\vert_{x_{\mathrm{min}}}+\dots$$ Since we are at point where the first derivative is zero and inserting the value for the second derivative of $V$, keeping only terms up to the second derivative and finally taking the derivative with respect to $x$, we find the expression for the force $$F(x) = -(x-x_{\mathrm{min}})\frac{d^2V}{dx^2}\vert_{x_{\mathrm{min}}},$$ and setting in the expression for the second derivative at the minimum we find $$F(x) = -2\alpha(x-\sqrt{\frac{\alpha}{\beta}}).$$ Thus our effective spring constant $k=2\alpha$.* We ignore the second term in the potential energy and keep only the term proportional to the effective spring constant, that is a force $F\propto kx$. Find the acceleration and set up the differential equation. Find the general analytical solution for these harmonic oscillations. You don't need to find the constants in the general solution.Here we simplify our force by rescaling our zeroth point so that we have a force (setting $x_{\mathrm{min}}=0$) $$F(x) = -kx,$$ with $k=2\alpha$. Defining a natural frequency $\omega_0 = \sqrt{k/m}$, where $m$ is the mass of our particle, we have the following equation of motion $$\frac{d^2x}{dt^2}=-\omega_0^2x,$$ which has as analytical solution $x(t)=A\cos{(\omega_0t)}+B\sin{(\omega_0t)}$ and velocity$x(t)=-\omega_0A\sin{(\omega_0t)}+\omega_0B\cos{(\omega_0t)}$. The initial conditions are used to define $A$ and $B$. Exercise: Testing Energy conservationThe code here implements Euler's method for the Earth-Sun system usinga more compact way of representing the vectors. Alternatively, youcould have spelled out all the variables $v_x$, $v_y$, $x$ and $y$ asone-dimensional arrays. It tests conservation of potential andkinetic energy as functions of time, in addition to the total energy,again as function of time**Note**: in all codes we have used scaled equations so that the gravitational constant times the mass of the sum is given by $4\pi^2$ and the mass of the earth is set to **one** in the calculations of kinetic and potential energies. Else, we would get very large results.
###Code
# Common imports
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
# Initial values, time step, positions and velocites
DeltaT = 0.0001
#set up arrays
tfinal = 100 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# setting up the kinetic, potential and total energy, note only functions of time
EKinetic = np.zeros(n)
EPotential = np.zeros(n)
ETotal = np.zeros(n)
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Setting up variables for the calculation of energies
# distance that defines rabs in potential energy
rabs0 = sqrt(sum(r[0]*r[0]))
# Initial kinetic energy. Note that we skip the mass of the Earth here, that is MassEarth=1 in all codes
EKinetic[0] = 0.5*sum(v0*v0)
# Initial potential energy (note negative sign, why?)
EPotential[0] = -4*pi*pi/rabs0
# Initial total energy
ETotal[0] = EPotential[0]+EKinetic[0]
# Start integrating using Euler's method
for i in range(n-1):
# Set up the acceleration
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update Energies, velocity, time and position using Euler's forward method
v[i+1] = v[i] + DeltaT*a
r[i+1] = r[i] + DeltaT*v[i]
t[i+1] = t[i] + DeltaT
EKinetic[i+1] = 0.5*sum(v[i+1]*v[i+1])
EPotential[i+1] = -4*pi*pi/sqrt(sum(r[i+1]*r[i+1]))
ETotal[i+1] = EPotential[i+1]+EKinetic[i+1]
# Plot energies as functions of time
fig, axs = plt.subplots(3, 1)
axs[0].plot(t, EKinetic)
axs[0].set_xlim(0, tfinal)
axs[0].set_ylabel('Kinetic energy')
axs[1].plot(t, EPotential)
axs[1].set_ylabel('Potential Energy')
axs[2].plot(t, ETotal)
axs[2].set_xlabel('Time [yr]')
axs[2].set_ylabel('Total Energy')
fig.tight_layout()
save_fig("EarthSunEuler")
plt.show()
###Output
_____no_output_____
###Markdown
We see very clearly that Euler's method does not conserve energy!! Try to reduce the time step $\Delta t$. What do you see?With the Euler-Cromer method, the only thing we need is to update theposition at a time $t+1$ with the update velocity from the sametime. Thus, the change in the code is extremely simply, and **energy issuddenly conserved**. Note that the error runs like $O(\Delta t)$ andthis is why we see the larger oscillations. But within thisoscillating energy envelope, we see that the energies swing between amax and a min value and never exceed these values.
###Code
# Common imports
import numpy as np
import pandas as pd
from math import *
import matplotlib.pyplot as plt
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
# Initial values, time step, positions and velocites
DeltaT = 0.0001
#set up arrays
tfinal = 100 # in years
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# setting up the kinetic, potential and total energy, note only functions of time
EKinetic = np.zeros(n)
EPotential = np.zeros(n)
ETotal = np.zeros(n)
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# Setting up variables for the calculation of energies
# distance that defines rabs in potential energy
rabs0 = sqrt(sum(r[0]*r[0]))
# Initial kinetic energy. Note that we skip the mass of the Earth here, that is MassEarth=1 in all codes
EKinetic[0] = 0.5*sum(v0*v0)
# Initial potential energy
EPotential[0] = -4*pi*pi/rabs0
# Initial total energy
ETotal[0] = EPotential[0]+EKinetic[0]
# Start integrating using Euler's method
for i in range(n-1):
# Set up the acceleration
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using Euler's forward method
v[i+1] = v[i] + DeltaT*a
# Only change when we add the Euler-Cromer method
r[i+1] = r[i] + DeltaT*v[i+1]
t[i+1] = t[i] + DeltaT
EKinetic[i+1] = 0.5*sum(v[i+1]*v[i+1])
EPotential[i+1] = -4*pi*pi/sqrt(sum(r[i+1]*r[i+1]))
ETotal[i+1] = EPotential[i+1]+EKinetic[i+1]
# Plot energies as functions of time
fig, axs = plt.subplots(3, 1)
axs[0].plot(t, EKinetic)
axs[0].set_xlim(0, tfinal)
axs[0].set_ylabel('Kinetic energy')
axs[1].plot(t, EPotential)
axs[1].set_ylabel('Potential Energy')
axs[2].plot(t, ETotal)
axs[2].set_xlabel('Time [yr]')
axs[2].set_ylabel('Total Energy')
fig.tight_layout()
save_fig("EarthSunEulerCromer")
plt.show()
###Output
_____no_output_____
###Markdown
Our final equations for the position and the velocity become then $$x_{i+1} = x_i+(\Delta t)v_i+\frac{(\Delta t)^2}{2}a_{i}+O((\Delta t)^3),$$ and $$v_{i+1} = v_i+\frac{(\Delta t)}{2}\left(a_{i+1}+a_{i}\right)+O((\Delta t)^3).$$ Note well that the term $a_{i+1}$ depends on the position at $x_{i+1}$. This means that you need to calculate the position at the updated time $t_{i+1}$ before the computing the next velocity. Note also that the derivative of the velocity at the time$t_i$ used in the updating of the position can be reused in the calculation of the velocity update as well. We can now easily add the Verlet method to our original code as
###Code
DeltaT = 0.001
#set up arrays
tfinal = 100
n = ceil(tfinal/DeltaT)
# set up arrays for t, a, v, and x
t = np.zeros(n)
v = np.zeros((n,2))
r = np.zeros((n,2))
# Initial conditions as compact 2-dimensional arrays
r0 = np.array([1.0,0.0])
v0 = np.array([0.0,2*pi])
r[0] = r0
v[0] = v0
Fourpi2 = 4*pi*pi
# setting up the kinetic, potential and total energy, note only functions of time
EKinetic = np.zeros(n)
EPotential = np.zeros(n)
ETotal = np.zeros(n)
# Setting up variables for the calculation of energies
# distance that defines rabs in potential energy
rabs0 = sqrt(sum(r[0]*r[0]))
# Initial kinetic energy. Note that we skip the mass of the Earth here, that is MassEarth=1 in all codes
EKinetic[0] = 0.5*sum(v0*v0)
# Initial potential energy
EPotential[0] = -4*pi*pi/rabs0
# Initial total energy
ETotal[0] = EPotential[0]+EKinetic[0]
# Start integrating using the Velocity-Verlet method
for i in range(n-1):
# Set up forces, air resistance FD, note now that we need the norm of the vecto
# Here you could have defined your own function for this
rabs = sqrt(sum(r[i]*r[i]))
a = -Fourpi2*r[i]/(rabs**3)
# update velocity, time and position using the Velocity-Verlet method
r[i+1] = r[i] + DeltaT*v[i]+0.5*(DeltaT**2)*a
rabs = sqrt(sum(r[i+1]*r[i+1]))
anew = -4*(pi**2)*r[i+1]/(rabs**3)
v[i+1] = v[i] + 0.5*DeltaT*(a+anew)
t[i+1] = t[i] + DeltaT
EKinetic[i+1] = 0.5*sum(v[i+1]*v[i+1])
EPotential[i+1] = -4*pi*pi/sqrt(sum(r[i+1]*r[i+1]))
ETotal[i+1] = EPotential[i+1]+EKinetic[i+1]
# Plot energies as functions of time
fig, axs = plt.subplots(3, 1)
axs[0].plot(t, EKinetic)
axs[0].set_xlim(0, tfinal)
axs[0].set_ylabel('Kinetic energy')
axs[1].plot(t, EPotential)
axs[1].set_ylabel('Potential Energy')
axs[2].plot(t, ETotal)
axs[2].set_xlabel('Time [yr]')
axs[2].set_ylabel('Total Energy')
fig.tight_layout()
save_fig("EarthSunVelocityVerlet")
plt.show()
###Output
_____no_output_____ |
Sorting/4. Merge_Sort.ipynb | ###Markdown
Merge Sort
###Code
def merge_sort(array):
if len(array) < 2:
return array
mid = len(array) // 2
left = merge_sort(array[:mid])
right = merge_sort(array[mid:])
return merge(left, right)
def merge(left, right):
result = []
i, j = 0, 0
while i < len(left) or j < len(right):
if left[i] <= right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
if i == len(left) or j == len(right):
result.extend(left[i:] or right[j:])
break
return result
###Output
_____no_output_____
###Markdown
Time Complexity:- Best Case: O(n log2(n))- Average Case: O(n log2(n))- Worst Case: O(n log2(n)) Why O(n log n) ? If you are given two sorted arrays(say A & B) of length n/2 then it will take O(n) time to merge and make a sorted array of length n.But if A and B are not sorted then we need to sort them first. For this we first divide array A and B of length n/2 each into two arrays of length n/4 and suppose these two arrays are already sorted.Now to merge two sorted array of length n/4 to make array A of length n/2 will take O(n/2) time and similarly array B formation will also take O(n/2) time.So total time to make array A and B both also took O(n). So at every stage it is taking O(n) time. So the total time for merge sort will be O(no. of stages * n).Here we are dividing array into two parts at every stage and we will continue dividing untill length of two divided array is one.So if length of array is eight then we need to divide it three times to get arrays of length one like this8 = 4+4 = 2+2+2+2 = 1+1+1+1+1+1+1+1Sono. of stages = log2(8) = 3That is why merge sort is O(nlog(n)) with log2(n) iteration. Code for executing and seeing the difference in time complexities Best Case Performance:
###Code
# elements are already sorted
array = [i for i in range(1, 20)]
print(array)
# 20 ALREADY sorted elements need 18 iterations approx = n
print(merge_sort(array))
###Output
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
###Markdown
Average Case Performance:
###Code
import random
# elements are randomly shuffled
array = [i for i in range(1, 20)]
random.shuffle(array)
print(array)
# 20 shuffled elements need 324 iterations approx = n * n
print(merge_sort(array))
###Output
[5, 2, 17, 15, 3, 13, 9, 12, 7, 19, 11, 18, 14, 10, 1, 16, 4, 8, 6]
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
###Markdown
Worst Case Performance:
###Code
# elements are reverse sorted
array = [i for i in range(1, 20)]
# reversing the array
array = array[::-1]
print(array)
# 20 REVERSE sorted elements need 324 iterations approx = n * n
print(merge_sort(array))
###Output
[19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
###Markdown
Merge Sort
###Code
def merge_sort(array):
if len(array) < 2:
return array
mid = len(array) // 2
left = merge_sort(array[:mid])
right = merge_sort(array[mid:])
return merge(left, right)
def merge(left, right):
result = []
i, j = 0, 0
while i < len(left) or j < len(right):
if left[i] <= right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
if i == len(left) or j == len(right):
result.extend(left[i:] or right[j:])
break
return result
###Output
_____no_output_____
###Markdown
Time Complexity:- Best Case: O(n log2(n))- Average Case: O(n log2(n))- Worst Case: O(n log2(n)) Why O(n log n) ? If you are given two sorted arrays(say A & B) of length n/2 then it will take O(n) time to merge and make a sorted array of length n.But if A and B are not sorted then we need to sort them first. For this we first divide array A and B of length n/2 each into two arrays of length n/4 and suppose these two arrays are already sorted.Now to merge two sorted array of length n/4 to make array A of length n/2 will take O(n/2) time and similarly array B formation will also take O(n/2) time.So total time to make array A and B both also took O(n). So at every stage it is taking O(n) time. So the total time for merge sort will be O(no. of stages * n).Here we are dividing array into two parts at every stage and we will continue dividing untill length of two divided array is one.So if length of array is eight then we need to divide it three times to get arrays of length one like this8 = 4+4 = 2+2+2+2 = 1+1+1+1+1+1+1+1Sono. of stages = log2(8) = 3That is why merge sort is O(nlog(n)) with log2(n) iteration. Code for executing and seeing the difference in time complexities Best Case Performance:
###Code
# elements are already sorted
array = [i for i in range(1, 20)]
print(array)
# 20 ALREADY sorted elements need 18 iterations approx = n
print(merge_sort(array))
###Output
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
###Markdown
Average Case Performance:
###Code
import random
# elements are randomly shuffled
array = [i for i in range(1, 20)]
random.shuffle(array)
print(array)
# 20 shuffled elements need 324 iterations approx = n * n
print(merge_sort(array))
###Output
[5, 2, 17, 15, 3, 13, 9, 12, 7, 19, 11, 18, 14, 10, 1, 16, 4, 8, 6]
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
###Markdown
Worst Case Performance:
###Code
# elements are reverse sorted
array = [i for i in range(1, 20)]
# reversing the array
array = array[::-1]
print(array)
# 20 REVERSE sorted elements need 324 iterations approx = n * n
print(merge_sort(array))
###Output
[19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
|
cnn/cats-and-dogs/cats-and-dogs-transfer-learning.ipynb | ###Markdown
Introducing Data Augmentation Imports
###Code
import tensorflow as tf
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from keras.utils import np_utils
from keras.callbacks import Callback
from keras.preprocessing.image import ImageDataGenerator
from keras.applications import VGG16
from tensorflow.python.keras.callbacks import TensorBoard
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib import rcParams
from time import time
import os
import wandb
from wandb.keras import WandbCallback
import numpy as np
###Output
Using TensorFlow backend.
###Markdown
Logging code
###Code
TENSORBOARD_LOGS_DIR = f"dandc-{int(time())}"
tensorboard = TensorBoard(log_dir=f"logs/{TENSORBOARD_LOGS_DIR}", write_images=True, histogram_freq=1, write_grads=True)
# run = wandb.init()
# config = run.config
###Output
_____no_output_____
###Markdown
Network Configuration
###Code
config = {
'img_width': 150,
'img_height': 150,
'first_layer_conv_width': 3,
'first_layer_conv_height': 3,
'dense_layer_size': 512,
'epochs': 100,
'optimizer': "adam",
'hidden_nodes': 100
}
###Output
_____no_output_____
###Markdown
Images Dataset
###Code
base_dir = 'E:\kaggle\dogs-and-cats\sm_dataset'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
###Output
_____no_output_____
###Markdown
Data Generation
###Code
train_datagen = ImageDataGenerator(rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
validation_generator = test_datagen.flow_from_directory(validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary')
###Output
Found 2000 images belonging to 2 classes.
Found 1000 images belonging to 2 classes.
###Markdown
Inspect Generators
###Code
for data_batch, labels_batch in train_generator:
print('data batch shape:', data_batch.shape)
print('labels batch shape:', labels_batch.shape)
break
%matplotlib inline
rcParams['figure.figsize'] = 11, 8
fig, ax = plt.subplots(1,4)
for data_batch, labels_batch in train_generator:
ax[0].imshow(data_batch[0])
ax[1].imshow(data_batch[1])
ax[2].imshow(data_batch[2])
ax[3].imshow(data_batch[3])
break
###Output
_____no_output_____
###Markdown
Transfer Convolutional Base
###Code
conv_base = VGG16(
weights = 'imagenet',
include_top = False,
input_shape = (150, 150, 3)
)
conv_base.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 150, 150, 3) 0
_________________________________________________________________
block1_conv1 (Conv2D) (None, 150, 150, 64) 1792
_________________________________________________________________
block1_conv2 (Conv2D) (None, 150, 150, 64) 36928
_________________________________________________________________
block1_pool (MaxPooling2D) (None, 75, 75, 64) 0
_________________________________________________________________
block2_conv1 (Conv2D) (None, 75, 75, 128) 73856
_________________________________________________________________
block2_conv2 (Conv2D) (None, 75, 75, 128) 147584
_________________________________________________________________
block2_pool (MaxPooling2D) (None, 37, 37, 128) 0
_________________________________________________________________
block3_conv1 (Conv2D) (None, 37, 37, 256) 295168
_________________________________________________________________
block3_conv2 (Conv2D) (None, 37, 37, 256) 590080
_________________________________________________________________
block3_conv3 (Conv2D) (None, 37, 37, 256) 590080
_________________________________________________________________
block3_pool (MaxPooling2D) (None, 18, 18, 256) 0
_________________________________________________________________
block4_conv1 (Conv2D) (None, 18, 18, 512) 1180160
_________________________________________________________________
block4_conv2 (Conv2D) (None, 18, 18, 512) 2359808
_________________________________________________________________
block4_conv3 (Conv2D) (None, 18, 18, 512) 2359808
_________________________________________________________________
block4_pool (MaxPooling2D) (None, 9, 9, 512) 0
_________________________________________________________________
block5_conv1 (Conv2D) (None, 9, 9, 512) 2359808
_________________________________________________________________
block5_conv2 (Conv2D) (None, 9, 9, 512) 2359808
_________________________________________________________________
block5_conv3 (Conv2D) (None, 9, 9, 512) 2359808
_________________________________________________________________
block5_pool (MaxPooling2D) (None, 4, 4, 512) 0
=================================================================
Total params: 14,714,688
Trainable params: 14,714,688
Non-trainable params: 0
_________________________________________________________________
###Markdown
Create Model
###Code
model = Sequential()
model.add(Conv2D(32,
(config['first_layer_conv_width'], config['first_layer_conv_height']),
input_shape=(config['img_width'], config['img_height'], 3),
activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(config['dense_layer_size'], activation='relu'))
#model.add(Dropout(0.4))
model.add(Dense(1, activation="sigmoid"))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_9 (Conv2D) (None, 148, 148, 32) 896
_________________________________________________________________
max_pooling2d_9 (MaxPooling2 (None, 74, 74, 32) 0
_________________________________________________________________
conv2d_10 (Conv2D) (None, 72, 72, 64) 18496
_________________________________________________________________
max_pooling2d_10 (MaxPooling (None, 36, 36, 64) 0
_________________________________________________________________
conv2d_11 (Conv2D) (None, 34, 34, 128) 73856
_________________________________________________________________
max_pooling2d_11 (MaxPooling (None, 17, 17, 128) 0
_________________________________________________________________
conv2d_12 (Conv2D) (None, 15, 15, 128) 147584
_________________________________________________________________
max_pooling2d_12 (MaxPooling (None, 7, 7, 128) 0
_________________________________________________________________
flatten_3 (Flatten) (None, 6272) 0
_________________________________________________________________
dropout_4 (Dropout) (None, 6272) 0
_________________________________________________________________
dense_5 (Dense) (None, 512) 3211776
_________________________________________________________________
dense_6 (Dense) (None, 1) 513
=================================================================
Total params: 3,453,121
Trainable params: 3,453,121
Non-trainable params: 0
_________________________________________________________________
###Markdown
Training
###Code
class TensorBoardWrapper(TensorBoard):
'''Sets the self.validation_data property for use with TensorBoard callback.'''
def __init__(self, batch_gen, nb_steps, b_size, **kwargs):
super(TensorBoardWrapper, self).__init__(**kwargs)
self.batch_gen = batch_gen # The generator.
self.nb_steps = nb_steps # Number of times to call next() on the generator.
#self.batch_size = b_size
def on_epoch_end(self, epoch, logs):
# Fill in the `validation_data` property. Obviously this is specific to how your generator works.
# Below is an example that yields images and classification tags.
# After it's filled in, the regular on_epoch_end method has access to the validation_data.
imgs, tags = None, None
for s in range(self.nb_steps):
ib, tb = next(self.batch_gen)
if imgs is None and tags is None:
imgs = np.zeros(((self.nb_steps * self.batch_size,) + ib.shape[1:]), dtype=np.float32)
tags = np.zeros(((self.nb_steps * self.batch_size,) + tb.shape[1:]), dtype=np.uint8)
imgs[s * ib.shape[0]:(s + 1) * ib.shape[0]] = ib
tags[s * tb.shape[0]:(s + 1) * tb.shape[0]] = tb
self.validation_data = [imgs, tags, np.ones(imgs.shape[0])]
return super(TensorBoardWrapper, self).on_epoch_end(epoch, logs)
tbw = TensorBoardWrapper(validation_generator, nb_steps=50 // 20, b_size=20, log_dir='./log',
histogram_freq=1,
write_graph=False, write_grads=True)
history = model.fit_generator(train_generator, epochs=config['epochs'], steps_per_epoch=100, validation_data=validation_generator, validation_steps=50, callbacks=[])
###Output
Epoch 1/100
100/100 [==============================] - 18s 178ms/step - loss: 0.6958 - acc: 0.4975 - val_loss: 0.6930 - val_acc: 0.5000
Epoch 2/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6929 - acc: 0.5155 - val_loss: 0.6928 - val_acc: 0.4990
Epoch 3/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6935 - acc: 0.4940 - val_loss: 0.6923 - val_acc: 0.5010
Epoch 4/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6926 - acc: 0.4980 - val_loss: 0.6922 - val_acc: 0.5090
Epoch 5/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6918 - acc: 0.5010 - val_loss: 0.6868 - val_acc: 0.5500
Epoch 6/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6933 - acc: 0.5125 - val_loss: 0.6900 - val_acc: 0.5320
Epoch 7/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6908 - acc: 0.5285 - val_loss: 0.6875 - val_acc: 0.5400
Epoch 8/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6894 - acc: 0.5375 - val_loss: 0.6896 - val_acc: 0.5400
Epoch 9/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6922 - acc: 0.5285 - val_loss: 0.6864 - val_acc: 0.5580
Epoch 10/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6900 - acc: 0.5330 - val_loss: 0.6896 - val_acc: 0.5150
Epoch 11/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6900 - acc: 0.5275 - val_loss: 0.6821 - val_acc: 0.5540
Epoch 12/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6926 - acc: 0.5045 - val_loss: 0.6892 - val_acc: 0.5000
Epoch 13/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6885 - acc: 0.5235 - val_loss: 0.6866 - val_acc: 0.5440
Epoch 14/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6901 - acc: 0.5215 - val_loss: 0.6915 - val_acc: 0.5160
Epoch 15/100
100/100 [==============================] - 16s 162ms/step - loss: 0.6894 - acc: 0.5175 - val_loss: 0.6853 - val_acc: 0.5430
Epoch 16/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6890 - acc: 0.5380 - val_loss: 0.6895 - val_acc: 0.5340
Epoch 17/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6899 - acc: 0.5335 - val_loss: 0.6876 - val_acc: 0.5260
Epoch 18/100
100/100 [==============================] - 16s 162ms/step - loss: 0.6884 - acc: 0.5390 - val_loss: 0.6856 - val_acc: 0.5620
Epoch 19/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6825 - acc: 0.5620 - val_loss: 0.6876 - val_acc: 0.5210
Epoch 20/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6829 - acc: 0.5500 - val_loss: 0.6815 - val_acc: 0.5750
Epoch 21/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6671 - acc: 0.5980 - val_loss: 0.6614 - val_acc: 0.6270
Epoch 22/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6551 - acc: 0.6320 - val_loss: 0.6776 - val_acc: 0.5460
Epoch 23/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6491 - acc: 0.6245 - val_loss: 0.6743 - val_acc: 0.6070
Epoch 24/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6364 - acc: 0.6230 - val_loss: 0.6365 - val_acc: 0.6360
Epoch 25/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6283 - acc: 0.6530 - val_loss: 0.6232 - val_acc: 0.6600
Epoch 26/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6095 - acc: 0.6565 - val_loss: 0.6299 - val_acc: 0.6760
Epoch 27/100
100/100 [==============================] - 16s 163ms/step - loss: 0.6096 - acc: 0.6700 - val_loss: 0.6221 - val_acc: 0.6750
Epoch 28/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6161 - acc: 0.6745 - val_loss: 0.6262 - val_acc: 0.6810
Epoch 29/100
100/100 [==============================] - 16s 162ms/step - loss: 0.6008 - acc: 0.6785 - val_loss: 0.6237 - val_acc: 0.6660
Epoch 30/100
100/100 [==============================] - 16s 161ms/step - loss: 0.5938 - acc: 0.6890 - val_loss: 0.6198 - val_acc: 0.6810
Epoch 31/100
100/100 [==============================] - 16s 162ms/step - loss: 0.5762 - acc: 0.6955 - val_loss: 0.6670 - val_acc: 0.6240
Epoch 32/100
100/100 [==============================] - 16s 161ms/step - loss: 0.6226 - acc: 0.6590 - val_loss: 0.5886 - val_acc: 0.6820
Epoch 33/100
100/100 [==============================] - 16s 162ms/step - loss: 0.5856 - acc: 0.6910 - val_loss: 0.5872 - val_acc: 0.7100
Epoch 34/100
100/100 [==============================] - 16s 161ms/step - loss: 0.5694 - acc: 0.7040 - val_loss: 0.6280 - val_acc: 0.6630
Epoch 35/100
100/100 [==============================] - 16s 162ms/step - loss: 0.5745 - acc: 0.6995 - val_loss: 0.5910 - val_acc: 0.7050
Epoch 36/100
100/100 [==============================] - 16s 162ms/step - loss: 0.5666 - acc: 0.7045 - val_loss: 0.5914 - val_acc: 0.7200
Epoch 37/100
100/100 [==============================] - 16s 162ms/step - loss: 0.5540 - acc: 0.7160 - val_loss: 0.5869 - val_acc: 0.6970
Epoch 38/100
100/100 [==============================] - 16s 162ms/step - loss: 0.5474 - acc: 0.7200 - val_loss: 0.6178 - val_acc: 0.6900
Epoch 39/100
100/100 [==============================] - 16s 163ms/step - loss: 0.5485 - acc: 0.7265 - val_loss: 0.5738 - val_acc: 0.7120
Epoch 40/100
100/100 [==============================] - 16s 162ms/step - loss: 0.5402 - acc: 0.7345 - val_loss: 0.5718 - val_acc: 0.7180
Epoch 41/100
100/100 [==============================] - 16s 162ms/step - loss: 0.5374 - acc: 0.7415 - val_loss: 0.5832 - val_acc: 0.7220
Epoch 42/100
100/100 [==============================] - 16s 162ms/step - loss: 0.5303 - acc: 0.7435 - val_loss: 0.5779 - val_acc: 0.7160
Epoch 43/100
100/100 [==============================] - 16s 162ms/step - loss: 0.5202 - acc: 0.7460 - val_loss: 0.5672 - val_acc: 0.7330
Epoch 44/100
100/100 [==============================] - 16s 162ms/step - loss: 0.5409 - acc: 0.7295 - val_loss: 0.5662 - val_acc: 0.7130
Epoch 45/100
100/100 [==============================] - 16s 162ms/step - loss: 0.5248 - acc: 0.7360 - val_loss: 0.5386 - val_acc: 0.7390
Epoch 46/100
100/100 [==============================] - 16s 162ms/step - loss: 0.5265 - acc: 0.7415 - val_loss: 0.5579 - val_acc: 0.7290
Epoch 47/100
100/100 [==============================] - 16s 162ms/step - loss: 0.5042 - acc: 0.7530 - val_loss: 0.5492 - val_acc: 0.7410
Epoch 48/100
100/100 [==============================] - 16s 162ms/step - loss: 0.5148 - acc: 0.7485 - val_loss: 0.5560 - val_acc: 0.7500
Epoch 49/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4959 - acc: 0.7610 - val_loss: 0.5701 - val_acc: 0.7410
Epoch 50/100
100/100 [==============================] - 16s 161ms/step - loss: 0.5160 - acc: 0.7510 - val_loss: 0.5779 - val_acc: 0.7250
Epoch 51/100
100/100 [==============================] - 16s 163ms/step - loss: 0.5048 - acc: 0.7425 - val_loss: 0.5384 - val_acc: 0.7560
Epoch 52/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4906 - acc: 0.7625 - val_loss: 0.5775 - val_acc: 0.7100
Epoch 53/100
100/100 [==============================] - 16s 162ms/step - loss: 0.5043 - acc: 0.7570 - val_loss: 0.5203 - val_acc: 0.7510
Epoch 54/100
100/100 [==============================] - 16s 163ms/step - loss: 0.4882 - acc: 0.7765 - val_loss: 0.5286 - val_acc: 0.7560
Epoch 55/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4806 - acc: 0.7775 - val_loss: 0.5725 - val_acc: 0.7420
Epoch 56/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4910 - acc: 0.7625 - val_loss: 0.5023 - val_acc: 0.7580
Epoch 57/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4728 - acc: 0.7735 - val_loss: 0.5232 - val_acc: 0.7610
Epoch 58/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4588 - acc: 0.7840 - val_loss: 0.5003 - val_acc: 0.7640
Epoch 59/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4642 - acc: 0.7885 - val_loss: 0.5052 - val_acc: 0.7710
Epoch 60/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4617 - acc: 0.7745 - val_loss: 0.4926 - val_acc: 0.7740
Epoch 61/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4468 - acc: 0.7895 - val_loss: 0.5536 - val_acc: 0.7710
Epoch 62/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4692 - acc: 0.7755 - val_loss: 0.5323 - val_acc: 0.7600
Epoch 63/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4525 - acc: 0.7915 - val_loss: 0.5278 - val_acc: 0.7780
Epoch 64/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4352 - acc: 0.7990 - val_loss: 0.4930 - val_acc: 0.7710
Epoch 65/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4351 - acc: 0.8065 - val_loss: 0.4860 - val_acc: 0.7720
Epoch 66/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4211 - acc: 0.8070 - val_loss: 0.4819 - val_acc: 0.7820
Epoch 67/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4356 - acc: 0.8065 - val_loss: 0.4870 - val_acc: 0.7830
Epoch 68/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4307 - acc: 0.7985 - val_loss: 0.5019 - val_acc: 0.7850
Epoch 69/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4327 - acc: 0.8025 - val_loss: 0.4645 - val_acc: 0.8020
Epoch 70/100
100/100 [==============================] - 16s 162ms/step - loss: 0.4106 - acc: 0.8140 - val_loss: 0.5007 - val_acc: 0.7790
Epoch 71/100
100/100 [==============================] - 16s 163ms/step - loss: 0.4138 - acc: 0.8070 - val_loss: 0.5941 - val_acc: 0.7510
Epoch 72/100
100/100 [==============================] - 16s 164ms/step - loss: 0.4318 - acc: 0.8085 - val_loss: 0.5491 - val_acc: 0.7670
Epoch 73/100
100/100 [==============================] - 16s 163ms/step - loss: 0.4198 - acc: 0.8130 - val_loss: 0.5108 - val_acc: 0.7790
Epoch 74/100
100/100 [==============================] - 16s 163ms/step - loss: 0.3947 - acc: 0.8305 - val_loss: 0.4982 - val_acc: 0.7910
Epoch 75/100
100/100 [==============================] - 16s 163ms/step - loss: 0.3889 - acc: 0.8140 - val_loss: 0.4818 - val_acc: 0.8020
Epoch 76/100
100/100 [==============================] - 16s 163ms/step - loss: 0.3862 - acc: 0.8215 - val_loss: 0.6651 - val_acc: 0.7400
Epoch 77/100
100/100 [==============================] - 16s 162ms/step - loss: 0.3810 - acc: 0.8325 - val_loss: 0.5519 - val_acc: 0.7690
Epoch 78/100
100/100 [==============================] - 16s 163ms/step - loss: 0.3977 - acc: 0.8195 - val_loss: 0.5263 - val_acc: 0.7880
Epoch 79/100
100/100 [==============================] - 16s 163ms/step - loss: 0.4025 - acc: 0.8215 - val_loss: 0.4674 - val_acc: 0.7920
Epoch 80/100
100/100 [==============================] - 16s 163ms/step - loss: 0.3771 - acc: 0.8260 - val_loss: 0.5375 - val_acc: 0.7580
Epoch 81/100
100/100 [==============================] - 16s 165ms/step - loss: 0.4006 - acc: 0.8165 - val_loss: 0.4415 - val_acc: 0.8040
Epoch 82/100
100/100 [==============================] - 16s 162ms/step - loss: 0.3858 - acc: 0.8255 - val_loss: 0.4753 - val_acc: 0.7930
Epoch 83/100
100/100 [==============================] - 16s 163ms/step - loss: 0.3814 - acc: 0.8325 - val_loss: 0.5088 - val_acc: 0.7850
Epoch 84/100
16/100 [===>..........................] - ETA: 4s - loss: 0.4039 - acc: 0.8219
###Markdown
Save model
###Code
model.save('candd01.h5')
###Output
_____no_output_____
###Markdown
Loss & Accuracy
###Code
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training Acc')
plt.plot(epochs, val_acc, 'b', label='Validation Acc')
plt.title('Training and Validation Accuracy')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Training and Validation loss
###Code
plt.plot(epochs, loss, 'bo', label='Training Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()
###Output
_____no_output_____ |
_notebooks/2021-11-23-ecommerce-EDA-analysis.ipynb | ###Markdown
"Online Store Visitors Behavioral Analysis"> "Exploring 20M rows of online store data"- toc: false- branch: master- badges: true- hide_github_badge: true- comments: true- categories: [pandas, EDA]- image: images/ecommerce-analytics.jpg- use_plotly: true- hide: false- search_exclude: false- metadata_key1: ecommerce_analysis- metadata_key2: Pandas A while ago, I found [this dataset](https://www.kaggle.com/mkechinov/ecommerce-events-history-in-cosmetics-shop) on **Kaggle** which contains 20M user events from an online store. Its size over two Gigabite and the owner challenged data scientists to dig and find insights. so I took this challenge. the dataset has been separated into five files, each file contains events for an entire month. Starting from Octobor 2019 to Febrary 2020. >Note: If you are a technical girl/guy [click here](https://saaleh2.github.io/ALHODAIF-Portfolio/pandas/eda/2021/11/23/ecommerce-EDA-analysis.htmlLet-us-explore-our-data) to skip to the analysis. For non-technicalsBefore starting this analysis I will explain some technical terms so they become easy to digest while reading this post. - __events record__ First of all, what is **event**? an **event** is a recod of user actions in apps and websites. Each click you do in apps and websites will be recorded for future analysis and other purposes. The following example is simplifying how events recorded in a databases:
###Code
#hide_input
# example data
example = {'event time':['2019/1/1 - 8:00 PM', '2019/1/1 - 8:05 PM', '2019/1/2 - 1:00 AM', '2019/1/2 - 1:10 AM', '2019/1/3 - 11:00 AM'],
'event type':['add to cart', 'purchase', 'view item', 'add to cart', 'view item'],
'product id':['milk', 'milk', 'Iphone 13', 'Iphone 13', 'soap'],
'category id':['food', 'food', 'electronics', 'electronics', 'cleaners'],
'brand':['Almarai', 'Almarai', 'Apple', 'Apple', 'NaN'],
'price':[10.50, 10.50, 1499, 1499, 15],
'user id':['saleh', 'saleh', 'salman', 'salman', 'saleh'],
'user session':[1115,1115,1125,1125, 2334]}
df = pd.DataFrame(example, index=[1,2,3,4,5])
df
###Output
_____no_output_____
###Markdown
- **How to read the events data?** The `event time` column records when did the event happen. And the `event type` column specify what the user did. `product id`, `category id`, and `user id` columns are always represented in numbers, but here we used text instead to simplify it. It's easier for computers to use numbers rather than texts especially when there are duplicates, like when two users have the same name. __Sessions__ Before explaining `user session` column, let's clear out what a **session** means. A **session** is a group of user interactions with the website or app that take place within 30 minutes. So let's say you opened Noon app at 8:00 PM and start viewing items until 8:29 PM. All events that have been recorded during this period will be given a single **session** reference number. And when you open Noon again the next day it will be considered as a new session and a new reference number will take a palce. Notice how the `user id` "saleh" got a new `user session` reference number after coming back on 2019/1/3. Empty valuesIf you look back at row number 5 you will see that `brand` cells are **_NaN_**. Nan is actually not a brand, it simply means empty cell. Sometimes you will see **_null_** instead but they are the same thing.___ Let us explore our dataWe're going first to look at our data structure and prepare the data for analysis. >Tip: Non-technicals can skip codes in the black boxes and read outputs (results) of the code under it.
###Code
#collapse_show
# importing all necessary libraries for this analysis.
import pandas as pd
import numpy as np
import plotly.express as px
# Reading the data and taking a quick look at data structure.
oct_file = pd.read_csv(r'D:\saleh\kaggle\2019-Oct.csv')
nov_file = pd.read_csv(r'D:\saleh\kaggle\2019-Nov.csv')
dec_file = pd.read_csv(r'D:\saleh\kaggle\2019-Dec.csv')
jan_file = pd.read_csv(r'D:\saleh\kaggle\2020-Jan.csv')
feb_file = pd.read_csv(r'D:\saleh\kaggle\2020-Feb.csv')
dataset = pd.concat([oct_file, nov_file, dec_file, jan_file, feb_file], ignore_index=True)
dataset.head()
###Output
_____no_output_____
###Markdown
The dataset has 9 columns, each row contains users' actions like adding items to the cart or viewing an item. At a glance, we see that our dataset needs some manipulation and cleaning. The `brand` column has empty values, but we will check in a minute how many empty cells are out there. Now let's see columns data type. It's important to make sure that data are given the right type so our tools don't mess up when dealing with data.
###Code
#collapse_show
# Calling dataset info
dataset.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 20692840 entries, 0 to 20692839
Data columns (total 9 columns):
# Column Dtype
--- ------ -----
0 event_time object
1 event_type object
2 product_id int64
3 category_id int64
4 category_code object
5 brand object
6 price float64
7 user_id int64
8 user_session object
dtypes: float64(1), int64(3), object(5)
memory usage: 1.4+ GB
###Markdown
Here, the `event_time` column is been signed as `object` type, which will make our life harder when we do time-based analysis. So we will convert it to `DateTime` data type:
###Code
#collapse_show
# changing event_time column from object to DateTime column
dataset['event_time'] = pd.to_datetime(dataset['event_time'])
###Output
_____no_output_____
###Markdown
Next we will check empty values.
###Code
#collapse_show
print('_______empty values_______\n', dataset.isna().sum())
###Output
_______empty values_______
event_time 0
event_type 0
product_id 0
category_id 0
category_code 20339246
brand 8757117
price 0
user_id 0
user_session 4598
dtype: int64
###Markdown
The `category_code` and `brand` columns are 98% and 43% empty respectively, so we will get rid of them to make my cute machine fly faster.
###Code
#collapse_show
# dropping "category_code" and "brand" column
dataset.drop(columns=['category_code', 'brand'], inplace=True)
###Output
_____no_output_____
###Markdown
Lastly, let's check if there are any outliers so that we don't fall into that old mistake. **"garbage in -> garbage out"**
###Code
#collapse_show
# counting number of events per user
dataset.groupby('user_id', as_index=False).agg(number_of_events=(
'event_type', 'count')).nlargest(10, 'number_of_events').reset_index()
###Output
_____no_output_____
###Markdown
The first user have an enormous number of events which could mean that it's not human or one of the employees doing their work using a user account. Possibilities are endless so we will exclude him anyway.
###Code
#collapse_show
# users list
outliers = [527021202]
# excluding outliers
dataset.drop(dataset[dataset['user_id'].isin(outliers)].index, inplace=True)
###Output
_____no_output_____
###Markdown
___ What are we looking for?__Good analysis comes out from good questions.__ For any online store, the most important thing is __conversion rate__. which basically means: out of all visitors to the website, how many of them actually placed orders. Therefore, we will build our analysis around that and try to find insights that can help the online store to understand their visitors' behavior and make better campaigns. What is the current conversion rate?__"If you can't measure it you can't manage it".__ Knowing the current state will help the stakeholders to measure whether if they are making any difference after using these analysis insights or not.
###Code
#collapse_show
# subsetting purchase events
# I found purchases with negative values, mostly it's returned orders.
purchases = dataset.query('event_type == "purchase" & price > 0')
# counting the total number of orders
orders_count = len(purchases['user_session'].unique())
# counting number of visitors
visitors_count = len(dataset['user_id'].unique())
# calculating conversion rate (number of orders / number of visitors)
conv_rate = round((orders_count / visitors_count),2)
print(f'Current conversion rate is:\033[1m %{conv_rate}')
###Output
Current conversion rate is:[1m %0.09
###Markdown
Which is below any industry average. For more on average conversion rate by industry check out [this](https://www.invespcro.com/blog/the-average-website-conversion-rate-by-industry/) informative post.___ When do users visit the store?Finding when users visit the store is crucial for digital marketing teams in many ways. The team can adjust their campaigns during these times which will lead to better targeting new customers and reduce the cost of their campaigns. In general, we want to know when users visit the store, and whether their behavior changes during weekends or not. Most active day of the week
###Code
#collapse_show
# Create 'date' column
dataset['date'] = dataset['event_time'].dt.date
# unique users in each day
unieq_users = dataset.drop_duplicates(subset=['user_id', 'date']).copy()
# Create 'dayname' column to group by it
unieq_users['dayname'] = unieq_users['event_time'].dt.day_name()
#aggregating data
count_vistors = unieq_users['dayname'].value_counts()
#visualizing data
days_order = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
fig = px.bar(count_vistors, x=count_vistors.index, y=count_vistors.values, template='plotly_white')
fig.update_layout(xaxis={'title':'', 'categoryorder':'array', 'categoryarray':days_order},
yaxis={'title':'Number of visitors'}, bargap=0.4)
fig.show()
###Output
_____no_output_____
###Markdown
Suprisengly, the most active days are the weekdays, not the weekends. What are the peak times?Let's be more specific and find out when do users visit the store during the day. we're going to split weekdays from the weekends since weekends tend to have different routines and potentially dissimilar behavior.
###Code
#collapse_show
# Creating hour and 'day of week' column
dataset['Hour'] = dataset['event_time'].dt.hour
dataset['d_of_week'] = dataset['event_time'].dt.weekday
# tagging each event as 'Weekend' or 'Weekday' time
wends_wdays = []
for ind, row in dataset.iterrows():
if row['d_of_week'] < 5:
wends_wdays.append('Weekday')
else:
wends_wdays.append('Weekend')
dataset['Wend/Wday'] = wends_wdays
###Output
_____no_output_____
###Markdown
Now we visualize peak times.
###Code
#collapse_show
# count the number of users per hour
peak_times = dataset.groupby(['Hour', 'Wend/Wday'], as_index=False).agg(
{'user_id': pd.Series.nunique}).rename(columns={'user_id': 'Number of visitors'})
# plotting data
fig2 = px.line(peak_times, x='Hour', y='Number of visitors', color='Wend/Wday',
markers=True, symbol='Wend/Wday', template='plotly_white')
fig2.show()
###Output
_____no_output_____
###Markdown
We have slightly different behavior between weekends and weekdays, where weekdays have two peaks during the day, but both lines drop dramatically after 8 pm.___ Top 10 productsOne of the ways to raise the conversion rate is to expand the range of the products. A key insight to that is to find top selling products and find expandable ones. Now we will detect the top 10 products and their categories.
###Code
#collapse_show
# count top 10 products
top_products = purchases.groupby('product_id', as_index=False)[
'event_type'].count().nlargest(10, 'event_type')
# subsetting 'product_id' & 'category_id' columns
prod_cat = purchases[[
'product_id', 'category_id']].drop_duplicates('product_id')
# selecting top 10 products with their category
top_10 = prod_cat[prod_cat['product_id'].isin(
top_products['product_id'])].reset_index(drop=True)
top_10.index += 1
top_10
###Output
_____no_output_____
###Markdown
Out of the top 10 products, eight start with **148758** `category_id`, which represents the key number of the main category (ex. Food), and then the subcategory number comes after(ex. Rice). ___ Average session durationAnother important KPI in ecommerce is the **average session duration**. a session is a group of user interactions with the website that take place within 30 minutes. sadly, the sessions of this store were not registered in the right way. many sessions lasted 151 days and others have 0-second duration as shown below. so we can not use this data in our analysis.
###Code
#collapse_show
# extracting start time and end time of each session
ses_st_end = dataset.groupby('user_session', as_index=False).agg(
min_=('event_time', np.min), max_=('event_time', np.max))
# subtract end from start to get session duration
ses_st_end['session_duration'] = ses_st_end['max_'] - ses_st_end['min_']
# select only needed columns
ses_duration = ses_st_end.loc[:, ['user_session', 'session_duration']].sort_values(
'session_duration', ascending=False).reset_index(drop=True)
ses_duration
###Output
_____no_output_____ |
docs/lectures/lecture25/notebook/feature_importance.ipynb | ###Markdown
Title**Exercise: Feature Importance** DescriptionThe goal of this exercise is to compare two feature importance methods; MDI, and Permutation Importance. For a discussion on the merits of each see. Instructions:- Read the dataset `heart.csv` as a pandas dataframe, and take a quick look at the data.- Assign the predictor and response variables as per the instructions given in the scaffold.- Set a max_depth value.- Define a `DecisionTreeClassifier` and fit on the entire data.- Define a `RandomForestClassifier` and fit on the entire data.- Calculate Permutation Importance for each of the two models. Remember that the MDI is automatically computed by sklearn when you call the classifiers.- Use the routines provided to display the feature importance of bar plots. The plots will look similar to the one given above. Hints:forest.feature_importances_ : Calculate the impurity-based feature importance.sklearn.inspection.permutation_importance() : Calculate the permutation-based feature importance.sklearn.RandomForestClassifier() : Returns a random forest classifier object.sklearn.DecisionTreeClassifier(): Returns a decision tree classifier object.NOTE - MDI is automatically computed by sklearn by calling RandomForestClassifier and/or DecisionTreeClassifier.
###Code
# Import the necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.inspection import permutation_importance
from sklearn.tree import DecisionTreeClassifier
from helper import plot_permute_importance, plot_feature_importance
%matplotlib inline
# Read the dataset and take a quick look
df = pd.read_csv("heart.csv")
df.head()
# Assign the predictor and response variables.
# 'AHD' is the response and all the other columns are the predictors
X = ___
y = ___
# Set the parameters
# The random state is fized for testing purposes
random_state = 44
# Choose a `max_depth` for your trees
max_depth = ___
###Output
_____no_output_____
###Markdown
SINGLE TREE
###Code
### edTest(test_decision_tree) ###
# Define a Decision Tree classifier with random_state as the above defined variable
# Set the maximum depth to be max_depth
tree = __
# Fit the model on the entire data
tree.fit(X, y);
# Using Permutation Importance to get the importance of features for the Decision Tree
# With random_state as the above defined variable
tree_result = ___
###Output
_____no_output_____
###Markdown
RANDOM FOREST
###Code
### edTest(test_random_forest) ###
# Define a Random Forest classifier with random_state as the above defined variable
# Set the maximum depth to be max_depth and use 10 estimators
forest = ___
# Fit the model on the entire data
forest.fit(X, y);
# Use Permutation Importance to get the importance of features for the Random Forest model
# With random_state as the above defined variable
forest_result = ___
###Output
_____no_output_____
###Markdown
PLOTTING THE FEATURE RANKING
###Code
# Use the helper code given to visualize the feature importance using 'MDI'
plot_feature_importance(tree,forest,X,y);
# Use the helper code given to visualize the feature importance using 'permutation feature importance'
plot_permute_importance(tree_result,forest_result,X,y);
###Output
_____no_output_____ |
tests/ConvLSTM_emotion_train.ipynb | ###Markdown
Copyright Netherlands eScience Center and Centrum Wiskunde & Informatica ** Function : Emotion recognition and forecast with ConvLSTM** ** Author : Yang Liu** ** Contributor : Tianyi Zhang (Centrum Wiskunde & Informatica)** Last Update : 2021.02.08 ** ** Last Update : 2021.02.12 ** ** Library : Pytorth, Numpy, os, DLACs, matplotlib **Description : This notebook serves to test the prediction skill of deep neural networks in emotion recognition and forecast. The convolutional Long Short Time Memory neural network is used to deal with this spatial-temporal sequence problem. We use Pytorch as the deep learning framework. ** Many to one prediction.** Return Values : Time series and figures **This project is a joint venture between NLeSC and CWI** The method comes from the study by Shi et. al. (2015) Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting.
###Code
%matplotlib inline
import sys
import numbers
import pickle
# for data loading
import os
# for pre-processing and machine learning
import numpy as np
import csv
#import sklearn
from scipy.signal import resample
#import scipy
import torch
import torch.nn.functional
sys.path.append("../")
import nemo
import nemo.ConvLSTM
import nemo.function
import nemo.metric
# for visualization
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
###Output
_____no_output_____
###Markdown
The testing device is Dell Inspirion 5680 with Intel Core i7-8700 x64 CPU and Nvidia GTX 1060 6GB GPU.Here is a benchmark about cpu v.s. gtx 1060 https://www.analyticsindiamag.com/deep-learning-tensorflow-benchmark-intel-i5-4210u-vs-geforce-nvidia-1060-6gb/
###Code
#################################################################################
######### datapath ########
#################################################################################
# please specify data path
datapath = 'C:\\Users\\nosta\\NEmo\\Data_CASE'
output_path = 'C:\\Users\\nosta\\NEmo\\results'
model_path = 'C:\\Users\\nosta\\NEmo\\models'
# please specify the constants for input data
window_size = 2000 # down-sampling constant
seq = 20
v_a = 0 # valance = 0, arousal = 1
# leave-one-out training and testing
num_s = 2
def load_data_new(window_size, num_s, v_a, datapath):
"""
For long cuts of data and the type is pickle.
"""
f = open(os.path.join(datapath, 'data_{}s'.format(int(window_size/100))),'rb')
data = pickle.load(f)
f.close()
samples = data["Samples"]
labels = data["label_s"]
subject_id = data["Subject_id"]
x_train = samples[np.where(subject_id!=num_s)[0],:,0:4]
x_test = samples[np.where(subject_id==num_s)[0],:,0:4]
y_train = np.zeros([0,int(window_size/seq),1])
y_test = np.zeros([0,int(window_size/seq),1])
for i in range(len(labels)):
sig = resample(labels[i][:,v_a],int(window_size/seq)).reshape([1,-1,1])/9
if subject_id[i] == num_s:
y_test = np.concatenate([y_test,sig],axis = 0)
else:
y_train = np.concatenate([y_train,sig],axis = 0)
return x_train, x_test, y_train, y_test
if __name__=="__main__":
print ('*********************** extract variables *************************')
#################################################################################
######### data gallery #########
#################################################################################
x_train, x_test, y_train, y_test = load_data_new(window_size, num_s, v_a, datapath)
#x_train, x_test, y_train, y_test = load_data_old(window_size, num_s, datapath)
# first check of data shape
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
print(y_test.shape)
#################################################################################
######### pre-processing #########
#################################################################################
# choose the target dimension for reshaping of the signals
batch_train_size, sample_x_size, channels = x_train.shape
batch_test_size, sample_x_size, _ = x_test.shape
_, sample_y_size, _ = y_train.shape
x_dim = 5
y_dim = 5
series_x_len = sample_x_size // (y_dim * x_dim)
series_y_len = sample_y_size // (y_dim * x_dim)
# reshape the input and labels
sample_train_xy = np.reshape(x_train,[batch_train_size, series_x_len, y_dim, x_dim, channels])
sample_test_xy = np.reshape(x_test,[batch_test_size, series_x_len, y_dim, x_dim, channels])
label_c_train_xy = np.reshape(y_train,[batch_train_size, series_y_len, y_dim, x_dim])
label_c_test_xy = np.reshape(y_test,[batch_test_size, series_y_len, y_dim, x_dim])
#################################################################################
######### normalization #########
#################################################################################
print('================ extract individual variables =================')
sample_1 = sample_train_xy[:,:,:,:,0]
sample_2 = sample_train_xy[:,:,:,:,1]
sample_3 = sample_train_xy[:,:,:,:,2]
sample_4 = sample_train_xy[:,:,:,:,3]
sample_1_test = sample_test_xy[:,:,:,:,0]
sample_2_test = sample_test_xy[:,:,:,:,1]
sample_3_test = sample_test_xy[:,:,:,:,2]
sample_4_test = sample_test_xy[:,:,:,:,3]
# using indicator for training
# video_label_3D = np.repeat(video_label[:,np.newaxis,:],series_len,1)
# video_label_4D = np.repeat(video_label_3D[:,:,np.newaxis,:],y_dim,2)
# video_label_xy = np.repeat(video_label_4D[:,:,:,np.newaxis,:],x_dim,3)
# video_label_xy.astype(float)
# length diff for training
len_diff = series_x_len / series_y_len
# for the output at certain loop iteration
diff_output_iter = np.arange(len_diff-1, series_x_len, len_diff)
print(diff_output_iter)
# first check of data shape
print(label_c_train_xy.shape)
print(sample_1.shape)
###Output
(232, 4, 5, 5)
(232, 80, 5, 5)
###Markdown
Procedure for LSTM ** We use Pytorth to implement LSTM neural network with time series of climate data. **
###Code
print ('******************* create basic dimensions for tensor and network *********************')
# specifications of neural network
input_channels = 4
hidden_channels = [3, 2, 1] # number of channels & hidden layers, the channels of last layer is the channels of output, too
#hidden_channels = [4, 3, 3, 2, 1]
kernel_size = 1
# here we input a sequence and predict the next step only
learning_rate = 0.001
num_epochs = 50
print (torch.__version__)
# check if CUDA is available
use_cuda = torch.cuda.is_available()
print("Is CUDA available? {}".format(use_cuda))
# CUDA settings torch.__version__ must > 0.4
# !!! This is important for the model!!! The first option is gpu
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print ('******************* cross validation and testing data *********************')
# mini-batch
mini_batch_size = 28
# iterations
# iterations
iterations = batch_train_size // mini_batch_size
if batch_train_size % mini_batch_size != 0:
extra_loop = True
iterations += 1
else:
extra_loop = False
print(extra_loop)
%%time
print ('******************* run ConvLSTM *********************')
print ('The model is designed to make many to one prediction.')
print ('A series of multi-chanel variables will be input to the model.')
print ('The model learns by verifying the output at each timestep.')
# check the sequence length
_, sequence_len, height, width = sample_1.shape
#_, sequence_len, height, width = sample_1_norm.shape
# initialize our model
model = nemo.ConvLSTM.ConvLSTM(input_channels, hidden_channels, kernel_size).to(device)
loss_fn = torch.nn.MSELoss(size_average=True)
# stochastic gradient descent
#optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
# Adam optimizer
optimiser = torch.optim.Adam(model.parameters(), lr=learning_rate)
print(model)
print(loss_fn)
print(optimiser)
for name, param in model.named_parameters():
if param.requires_grad:
print (name)
print (param.data)
print (param.size())
print ("=========================")
print('##############################################################')
print('############# preview model parameters matrix ###############')
print('##############################################################')
print('Number of parameter matrices: ', len(list(model.parameters())))
for i in range(len(list(model.parameters()))):
print(list(model.parameters())[i].size())
%%time
print('##############################################################')
print('################## start training loop #####################')
print('##############################################################')
hist = np.zeros(num_epochs * iterations)
# loop of epoch
for t in range(num_epochs):
for i in range(iterations):
# Clear stored gradient
model.zero_grad()
# loop
loop_num = mini_batch_size
if i == iterations - 1:
if extra_loop:
loop_num = batch_train_size % mini_batch_size
for j in range(loop_num):
# loop of timestep
# critical step for verifying the loss
crit_step = 0
for timestep in range(sequence_len):
# hidden state re-initialized inside the model when timestep=0
#################################################################################
######## create input tensor with multi-input dimension ########
#################################################################################
# create variables
x_input = np.stack((sample_1[i*mini_batch_size+j,timestep,:,:],
sample_2[i*mini_batch_size+j,timestep,:,:],
sample_3[i*mini_batch_size+j,timestep,:,:],
sample_4[i*mini_batch_size+j,timestep,:,:])) #vstack,hstack,dstack
x_var = torch.autograd.Variable(torch.Tensor(x_input).view(-1,input_channels,height,width)).to(device)
# Forward pass
y_pred, _ = model(x_var, timestep)
# counter for computing the loss
if timestep in diff_output_iter: # determined by the length of label
#################################################################################
######## create training tensor with multi-input dimension ########
#################################################################################
y_train_stack = np.stack((label_c_train_xy[i*mini_batch_size+j,crit_step,:,:])) #vstack,hstack,dstack
y_var = torch.autograd.Variable(torch.Tensor(y_train_stack).view(-1,hidden_channels[-1],height,width)).to(device)
crit_step = crit_step + 1 # move forward
#################################################################################
# choose training data
y_train = y_var
# torch.nn.functional.mse_loss(y_pred, y_train) can work with (scalar,vector) & (vector,vector)
# Please Make Sure y_pred & y_train have the same dimension
# accumulate loss
if timestep == diff_output_iter[0]:
loss = loss_fn(y_pred, y_train)
else:
loss += loss_fn(y_pred, y_train)
# print loss at certain iteration
if i % 50 == 0:
print("Epoch {} Iteration {} MSE: {:0.3f}".format(t, i, loss.item()))
# Gradcheck requires double precision numbers to run
#res = torch.autograd.gradcheck(loss_fn, (y_pred.double(), y_train.double()), eps=1e-6, raise_exception=True)
#print(res)
hist[i+t*iterations] = loss.item()
# Zero out gradient, else they will accumulate between epochs
optimiser.zero_grad()
# Backward pass
loss.backward(retain_graph=True)
# Update parameters
optimiser.step()
# save the model
if t % 20 == 0:
# (recommended) save the model parameters only
torch.save(model.state_dict(), os.path.join(model_path,'convlstm_emotion_epoch_{}.pkl'.format(t)))
# save the entire model
#torch.save(model, os.path.join(output_path,'convlstm.pkl'))
# save the model
# (recommended) save the model parameters only
torch.save(model.state_dict(), os.path.join(model_path,'convlstm_emotion.pkl'))
# save the entire model
#torch.save(model, os.path.join(output_path,'convlstm.pkl'))
#################################################################################
########### after training statistics ###########
#################################################################################
print ("******************* Loss with time **********************")
fig00 = plt.figure()
plt.plot(hist, 'r', label="Training loss")
plt.xlabel('Iterations')
plt.ylabel('MSE Error')
plt.legend()
plt.tight_layout()
fig00.savefig(os.path.join(output_path,'ConvLSTM_train_mse_error.png'),dpi=150)
print ("******************* Loss with time (log) **********************")
fig01 = plt.figure()
plt.plot(np.log(hist), 'r', label="Training loss")
plt.xlabel('Iterations')
plt.ylabel('Log mse error')
plt.legend()
plt.show()
plt.tight_layout()
fig01.savefig(os.path.join(output_path,'ConvLSTM_train_log_mse_error.png'),dpi=150)
print ('******************* evaluation matrix *********************')
# The prediction will be evaluated through RMSE against climatology
# error score for temporal-spatial fields, without keeping spatial pattern
def RMSE(x,y):
"""
Calculate the RMSE. x is input series and y is reference series.
It calculates RMSE over the domain, not over time. The spatial structure
will not be kept.
Parameter
----------------------
x: input time series with the shape [time, lat, lon]
"""
x_series = x.reshape(x.shape[0],-1)
y_series = y.reshape(y.shape[0],-1)
rmse = np.sqrt(np.mean((x_series - y_series)**2,1))
rmse_std = np.std(np.sqrt((x_series - y_series)**2,1))
return rmse, rmse_std
# error score for temporal-spatial fields, keeping spatial pattern
def MAE(x,y):
"""
Calculate the MAE. x is input series and y is reference series.
It calculate MAE over time and keeps the spatial structure.
"""
mae = np.mean(np.abs(x-y),0)
return mae
def MSE(x, y):
"""
Calculate the MSE. x is input series and y is reference series.
"""
mse = np.mean((x-y)**2)
return mse
%%time
#################################################################################
######## prediction ########
#################################################################################
print('##############################################################')
print('################### start prediction loop ###################')
print('##############################################################')
# forecast array
pred_label = np.zeros((batch_test_size, series_y_len, y_dim, x_dim),dtype=float)
# calculate loss for each sample
hist_label = np.zeros(batch_test_size)
for n in range(batch_test_size):
# Clear stored gradient
model.zero_grad()
# critical step for saving the output
crit_step_pred = 0
for timestep in range(sequence_len):
x_input = np.stack((sample_1_test[n,timestep,:,:],
sample_2_test[n,timestep,:,:],
sample_3_test[n,timestep,:,:],
sample_4_test[n,timestep,:,:]))
x_var_pred = torch.autograd.Variable(torch.Tensor(x_input).view(-1,input_channels,height,width),
requires_grad=False).to(device)
# make prediction
last_pred, _ = model(x_var_pred, timestep)
# counter for prediction
if timestep in diff_output_iter: # determined by the length of label
# GPU data should be transferred to CPU
pred_label[n,crit_step_pred,:,:] = last_pred[0,0,:,:].cpu().data.numpy()
crit_step_pred = crit_step_pred + 1 # move forward
# compute the error for each sample
hist_label[n] = MSE(label_c_test_xy[n,:,:,:], pred_label[n,:,:,:])
# save prediction as npz file
np.savez_compressed(os.path.join(output_path,'ConvLSTM_emotion_pred.npz'),
label_c = pred_label)
# plot the error
print ("******************* Loss with time **********************")
fig00 = plt.figure()
plt.plot(hist_label, 'r', label="Validation loss")
plt.xlabel('Sample')
plt.ylabel('MSE Error')
plt.legend()
plt.tight_layout()
fig00.savefig(os.path.join(output_path,'ConvLSTM_pred_mse_error.png'),dpi=150)
#####################################################################################
######## visualization of prediction and implement metrics ########
#####################################################################################
# compute mse
mse_label = MSE(label_c_test_xy, pred_label)
print(mse_label)
# save output as csv file
with open(os.path.join(output_path, "MSE_ConvLSTM_emotion.csv"), "wt+") as fp:
writer = csv.writer(fp, delimiter=",")
writer.writerow(["emotion prediction"]) # write header
writer.writerow(["label"])
writer.writerow([mse_label])
###Output
0.02882709742851517
|
notebooks/.ipynb_checkpoints/week2_lecture-checkpoint.ipynb | ###Markdown
Obtaining Course MaterialsThe course materials will be updated throughout the course, so we recommend that you download the most recent version of the materials before each lecture or recap session. The latest notebooks and other materials for this course can be obtained by following these steps:1. Go to the github page for the course: https://github.com/semacu/data-science-python2. Click on the green "Clone or download" button, which is on the right of the screen above the list of folders and files in the repository. This will cause a drop-down menu to appear:3. Click on the "Download ZIP" option in the drop-down menu, and a zip file containing the course content will be downloaded to your computer4. Move the zip file to wherever in your file system you want the course materials to be held e.g. your home directory5. Decompress the zip file to get a folder containing the course materials. Depending on your operating system, you may need to double-click the zip file, or issue a command on the terminal. On Windows 10, you can right click, click "Extract All...", click "Extract", and the folder will be decompressed in the same location as the zip file6. Launch the Jupyter Notebook app. Depending on your operating system, you may be able to search for "Jupyter" in the system menu and click the icon that appears, or you may need to issue a command on the terminal. On Windows, you can hit the Windows key, search for "Jupyter", and click the icon that appears:7. After launching, the Jupyter notebook home menu will open in your browser. Navigate to the course materials that you decompressed in step 5, and click on the notebook for this week to launch it. Functions OverviewFunctions are a general programming tool, and can be found in many programming languages. All functions operate in three steps:1. Call: the function is called with some inputs2. Compute: based on the inputs, the function does something3. Return: the function outputs the result of the computationPython has many functions pre-loaded, for example the print function. In this example, the function is **called** with some text as the input, the **computation** is doing something to the text (keeping it the same), and the **return** is the printing of text to screen.
###Code
print("I just called the print function")
###Output
I just called the print function
###Markdown
As well as the input, some functions also have additional options which change how the function works. These additional options are called **arguments**. For example, the print function has a *sep* argument which allows the user to specify which character to use as the text separator.
###Code
print("By", "default", "text", "is", "separated", "by", "spaces")
print("We", "can", "choose", "to", "separate", "by", "dashes", sep="-")
###Output
By default text is separated by spaces
We-can-choose-to-separate-by-dashes
###Markdown
Some useful functions in python are:- type() : returns the variable type for an object- len() : returns the length of a list or tuple- abs() : returns the absolute values of a numeric variable- sorted() : returns a sorted copy of a list or tuple- map() : applies a function to every element of a list or tuple, and returns a corresponding list of the returns of the function Defining your own function Why?Functions allow you to wrap up a chunk of code and execute it repeatedly without having to type the same code each time. This has four main advantages:1. Accuracy: by typing the code once, you only have one chance to make a mistake or typo (and if you discover a mistake, you only have to correct it once!)2. Flexibility: you can change or expand the behaviour of a function by making a change in the function code, and this change will apply across your script3. Readability: if someone reads your code, they only have to read the function code to understand how it works, instead of reading chunks of repeated code4. Portability: you can easily copy a function from one script and paste it in another script, instead of having to extract general-purpose lines of code that are embedded in a script written for a specific purpose How?To declare a new function, the **def** statement is used to name the function, specify its inputs, add the computation steps, and specify its output. This results in a **function definition**, and the function can then be called later in the script. All function definitions must have three elements:1. Name: you can call your function anything, but usually verbs are better than nouns (e.g. echo_text rather than text_echoer). Python will not check whether a function already exists with the name that you choose, so **do not use the same name as an existing function** e.g. print()2. Input: almost all functions take at least one input, either data to perform computaton on, or an option to change how the computation proceeds. You don't have to specify an input, but you **must** add a set of brackets after the function name, whether this contains any inputs or not3. Return: the return statement is required to signify the end of the function definition. You don't have to return anything at the end of the function, but the return statement must still be there with a set of empty bracketsBelow is a simple example of a function that takes one input (the text to be printed), and returns the text to the user. We assign the results of calling the function to a new variable, and print the results. As you can see, every line that you want to include in the function definition must be indented, including the **return()** statement:
###Code
def print_text(input_text):
output_text = input_text
return(output_text)
result = print_text("Hello world")
print(result)
###Output
Hello world
###Markdown
Variable scopeA key difference between code inside a function and elsewhere in a script is the **variable scope**: that is, where in the script variables can be accessed. If a variable is declared outside a function, it is a **global variable**: it can be accessed from within a function, or outside a function. In contrast, if a variable is declared inside a function, it is a **local function**: it can only be accessed from inside the function, but not outside. The code below demonstrates this - the *counter* variable is declared inside the function, to keep track of the number of words that have been checked, and it is accessed without trouble by the print message inside the function:
###Code
# this function checks whether each word in a list of words contains a Z
def check_Z(word_list):
Z_status = []
# this counter is a local variable
counter = 0
for i in word_list:
counter += 1
if "Z" in i:
Z_status.append(True)
else:
Z_status.append(False)
# accessing the local variable inside the function works fine
print("Word {} checked".format(counter))
return(Z_status)
check_Z(["Zoo", "Zimbabwe", "Ocelot"])
###Output
Word 1 checked
Word 2 checked
Word 3 checked
###Markdown
However, if we add an extra line at the end of the script which attempts to access the *counter* variable outside the function, we get an error:
###Code
# this function checks whether each word in a list of words contains a Z
def check_Z(word_list):
Z_status = []
# this counter is a local variable
counter = 0
for i in word_list:
counter += 1
if "Z" in i:
Z_status.append(True)
else:
Z_status.append(False)
# accessing the local variable inside the function works fine
print("Word {} checked".format(counter))
return(Z_status)
check_Z(["Zoo", "Zimbabwe", "Ocelot"])
# accessing the local variable outside the function doesn't work
print("Final counter value = {}".format(counter))
###Output
Word 1 checked
Word 2 checked
Word 3 checked
###Markdown
As this shows, variables declared inside a function are effectively invisible to all code that is outside the function. This can be frustrating, but overall it is a very good thing - without this behaviour, we would need to check the name of every variable within every function we are using to ensure that there are no clashes! Advanced functions Setting defaults for inputsFor some functions, it is useful to specify a default value for an input. This makes it optional for the user to set their own value, but allows the function to run successfully if this input is not specified. To do this, we simply add an equals sign and a value after the input name. For example, we may want to add a *verbose* input to the **print_text()** function to print progress messages while a function runs.
###Code
def print_text(input_text, verbose=False):
if verbose:
print("Reading input text")
output_text = input_text
if verbose:
print("Preparing to return text")
return(output_text)
###Output
_____no_output_____
###Markdown
The default setting for the *verbose* input is *False*, so the behaviour of the function will not change if the *verbose* input is not specified:
###Code
print_text("Hello world")
###Output
_____no_output_____
###Markdown
But we now have the option to provide a value to the *verbose* input, overriding the default value and getting progress messages:
###Code
print_text("Hello world", verbose=True)
###Output
Reading input text
Preparing to return text
###Markdown
If we specify a default value for any input, we must put this input after the other inputs that do not have defaults. For example, the new version of **print_text()** will not work if we switch the order of the inputs in the function definition:
###Code
def print_text(verbose=False, input_text):
if verbose:
print("Reading input text")
output_text = input_text
if verbose:
print("Preparing to return text")
return(output_text)
print_text("Hello world", verbose=True)
###Output
_____no_output_____
###Markdown
Functions calling other functionsOne useful application of functions is to split a complex task into a series of simple steps, each carried out by a separate function, and then write a master function that calls all of those separate functions in the correct order. Having a series of simple functions makes it easier to In this way, an entire workflow can be constructed that is easy to adjust, and scalable across a large number of datasets. For example, we might have a number of sample names, and for each we want to:1. Find the longest part of the name2. Check whether that part begins with a vowel3. Convert the lowercase letters to uppercase, and vice versaOnce we have done this, we want to gather the converted sample names into a list.
###Code
def find_name_longest_part(input_name):
parts = input_name.split("-")
longest_part = ""
for i in parts:
if len(i) > len(longest_part):
longest_part = i
return(longest_part)
# print(find_name_longest_part("1-32-ALPHA-C"))
def check_start_vowel(input_word):
vowels = ["a", "e", "i", "o", "u"]
word_to_test = input_word.lower()
if word_to_test[0] in vowels:
return(True)
else:
return(False)
# print(check_start_vowel("ALPHA"))
def convert_case(input_word):
output_word = ""
for i in input_word:
if i.islower():
output_word += i.upper()
else:
output_word += i.lower()
return(output_word)
# print(convert_case("ALPha"))
def convert_sample_name_parts(sample_names):
converted_names = []
for i in sample_names:
longest_part = find_name_longest_part(i)
if check_start_vowel(longest_part):
converted_names.append(convert_case(longest_part))
return(converted_names)
convert_sample_name_parts(["1-32-ALPHa-C", "1-33-PHI-omega", "1-34-BETA-sigMA"])
###Output
_____no_output_____
###Markdown
External functions Importing modulesOne of the great things about Python is the enormous number of functions that can be loaded and used. By default, Python only loads a basic set of functions when it is launched (or when a Jupyter notebook is opened), but extra functions can be loaded at any time by importing extra **modules**. To import a module, use the **import** command. For example, we can import the **os** module, which contains a number of functions that help us to interact with the operating system:
###Code
import os
###Output
_____no_output_____
###Markdown
Once a module has been imported, all of the functions contained within that module are available to use. For example, the **os** module has a **getcwd()** function, which returns the current working directory. To call this function, we must specify the module name and the function name together, in the form MODULE.FUNCTION:
###Code
os.getcwd()
###Output
_____no_output_____
###Markdown
Some of the most useful data science modules in Python are not included with a standard Python installation, but must be installed separately. If you are using conda, you should always [install modules through conda](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-pkgs.html) as well. Importing specific functions from modulesSome modules are very large, so we may not want to import the whole module if we only need to use one function. Instead, we can specify one or more functions to be loaded using the **from** command:
###Code
from os import getcwd
###Output
_____no_output_____
###Markdown
If a function has been loaded using this syntax, it does **not** need to be prefaced with the module name:
###Code
getcwd()
###Output
_____no_output_____
###Markdown
AliasingSometimes it is useful to be able to refer to a module by another name in our script, for example to reduce the amount of typing by shortening the name. To do this, we can use the **as** command to assign another name to a module as we import it, a technique known as **aliasing**. For example, we can import the **numpy** module and alias it as **np**:
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Now whenever we want to call a function from this module, we use the alias rather than the original name. For example, the **zeros()** function allows us to create an array of specific proportions, with each element as a zero:
###Code
np.zeros((3,4))
###Output
_____no_output_____
###Markdown
We can also alias a function name if we import it using the **from** command. However, as with naming your own function, it is critical to avoid using the name of a function that already exists:
###Code
from os import getcwd as get_current_wd
get_current_wd()
###Output
_____no_output_____
###Markdown
What not to doIn some code, you may see all of the functions being imported from a module using the following syntax:
###Code
from os import *
###Output
_____no_output_____
###Markdown
At first glance this looks more convenient than importing the module as a whole, because we no longer need to preface the function name with the module name when calling it:
###Code
getcwd()
###Output
_____no_output_____
###Markdown
However, **YOU SHOULD NEVER DO THIS**. This is because if the module contains a function with the same name as a function that is already loaded, Python will replace that pre-loaded function with the new function **without telling you**. This has three consequences:1. The pre-loaded function is no longer available2. Debugging is now much more difficult, because you don't know which function is being called3. If you call the function and expect to get the pre-loaded version, you will get unexpected (and potentially disastrous) behaviour Exercises Exercise 1In the code block below, there is a list of 5 protein sequences, specified in the single amino acid code where one letter corresponds to one amino acid. Write a function that finds the most abundant amino acid in a given protein sequence, but prints a warning message if the protein sequence is shorter than 10 amino acids. Run your function on each of the proteins in the list.
###Code
proteins = [
"MEAGPSGAAAGAYLPPLQQ",
"VFQAPRRPGIGTVGKPIKLLANYFEVDIPK",
"IDVYHY",
"EVDIKPDKCPRRVNREVV",
"EYMVQHFKPQIFGDRKPVYDGKKNIYTVTALPIGNER"
]
###Output
_____no_output_____
###Markdown
Exercise 2The code below is intended to specify a function which looks up the capital city of a given country, and call this function on a list of two countries. However, it currently has a bug which stops it running. There are three possibilities for the nature of this bug:1. Its arguments are in the wrong order2. It uses a variable that is out of scope3. It is missing the return statementWhat is the bug?
###Code
capital_cities = {
"Sweden": "Stockholm",
"UK": "London",
"USA": "Washington DC"
}
def find_capital_city(verbose=True, country):
if country in capital_cities:
capital_city = capital_cities[country]
if verbose:
print("Capital city located")
else:
capital_city = "CAPITAL CITY NOT FOUND"
return(capital_city)
countries = ["USA", "UK", "Sweden", "Belgium"]
for i in countries:
print(find_capital_city(i))
###Output
_____no_output_____ |
notebook_examples/dataframe_latlong.ipynb | ###Markdown
Geographical location data of Neighborhoods A dataframe will be created where location data (latitude and longitude) will be provided along with postal code, borough and neighborhood of Canada
###Code
import pandas as pd
# reading from wikipedia page through pandas
canada_df = pd.read_html('https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M')[0]
canada_df.head()
###Output
_____no_output_____
###Markdown
The data has to be cleaned, and rows with _Not Assigned_ entries in __both__ 'Neighborhood' and 'Borough' columns has to be truncated.
###Code
# cleaning data
# getting rid of rows which have 'Not assigned' value in both 'Borough' and 'Neighborhood' columns
canada_df_filtered = canada_df[(canada_df.Borough != 'Not assigned') & (canada_df.Neighborhood != 'Not assigned')]
# resetting index as rows were dropped
canada_df_filtered = canada_df_filtered.reset_index()
# truncating the old index column
canada_df_filtered = canada_df_filtered.drop(columns=['index'])
canada_df_filtered.head(10)
canada_df = canada_df_filtered
###Output
_____no_output_____
###Markdown
Reading the location data to a Pandas dataframeLocation data is downloaded from [link provided by Coursera](https://cocl.us/Geospatial_data). It is in `.csv` format. This will be read into a Pandas dataframe.
###Code
location_data = pd.read_csv('Geospatial_Coordinates.csv')
location_data.head()
location_data.shape
###Output
_____no_output_____
###Markdown
The `location_data` dataframe has exactly the same dimensions as the cleaned neighborhood dataset- `canada_df`.The two dataframes can be merged through __INNER MERGE__ to form a compact dataframe containing the location data (latitude and longitude) along with borough and neighborhood name. The merge has to done __`ON`__ the `'Postal Code'` column.
###Code
neighborhoods_loc = pd.merge(canada_df, location_data, how='inner', on=['Postal Code'])
neighborhoods_loc.head(10)
###Output
_____no_output_____ |
Homework #3.ipynb | ###Markdown
Data Compression - Homework 3 Imports and helper functions
###Code
import random
import numpy as np
import matplotlib.pyplot as plt
from math import log
%matplotlib inline
def random_bin():
return "0" if random.randint(1,10)<=8 else "1"
def generate_sequence(length):
return "".join([random_bin() for _ in range(length)])
def bin_to_float(bin_sequence):
res = 0
for ind, bit in enumerate(bin_sequence[2:]):
step = -(ind+1)
if bit == "1":
res += 2**step
return res
def float_to_bin(n):
div = 0.5
res = "0."
while n != 0:
res += str(int(n//div))
n %= div
div /= 2
return res
def arithmetic_encoding(sequence, seperator):
upper = 1
middle = seperator
lower = 0
for bit in sequence:
if bit == "0":
upper = middle
else:
lower = middle
middle = lower + (upper-lower)*seperator
tag = (lower+upper)/2
bin_tag = float_to_bin(tag)
bin_length = 6
tag = bin_to_float(bin_tag[:bin_length])
while tag<lower or upper<tag:
bin_length += 1
tag = bin_to_float(bin_tag[:bin_length])
bin_tag = bin_tag[:bin_length]
return tag, bin_tag
###Output
_____no_output_____
###Markdown
Arithmetic Encoding Example
###Code
sequence = generate_sequence(20)
print("random {} bits long binary sequence: {}".format(20, sequence))
tag, bin_tag = arithmetic_encoding(sequence, 0.8)
print("Arithmetic Encoded Tag: {} ({})".format(tag, bin_tag))
###Output
random 20 bits long binary sequence: 00101000100000010000
Arithmetic Encoded Tag: 0.602752685546875 (0.100110100100111)
###Markdown
Plot functions of length and entropy
###Code
FROM = 5
TO = 50
SAMPLES = 1000
ENTROPY = -0.8*log(0.8, 2)-0.2*log(0.2, 2)
rates = []
for L in range(FROM, TO+1):
tmp_rates = []
for ind in range(SAMPLES):
sequence = generate_sequence(L)
tag, bin_tag = arithmetic_encoding(sequence, 0.8)
rate = (len(bin_tag)-2)/L
tmp_rates.append(rate)
rates.append(np.mean(tmp_rates))
plt.axis([FROM, TO, 0.5, 2])
plt.plot(range(FROM, TO+1), [ENTROPY]*(TO-FROM+1))
plt.plot(range(FROM, TO+1), rates)
plt.xlabel("Length (L)")
plt.ylabel("Compress Rate (bits/symbol)")
plt.show()
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.