markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
標籤選擇器 選擇元素
#引入requests好爬取html檔案給bs4使用 import requests response = requests.get('http://ntumail.cc.ntu.edu.tw') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') #印出物包含外框標籤 print(soup.title) print(type(soup.title)) #回傳一個tag print(soup.head) print(soup.a) #顯示第一個match的結果
<title>NTU Mail-臺灣大學電子郵件系統</title> <class 'bs4.element.Tag'> <head> <meta content="text/html; charset=utf-8" http-equiv="Content-Type"/> <title>NTU Mail-臺灣大學電子郵件系統</title> <link href="images/style.css" rel="stylesheet" type="text/css"/> </head> <a href="http://www.ntu.edu.tw/">臺大首頁 NTU Home</a>
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping
獲取名稱、內容
#引入requests好爬取html檔案給bs4使用 import requests response = requests.get('http://ntumail.cc.ntu.edu.tw') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') print(soup.title.name) #列印tag名稱 print(soup.title.string) #列印tag裡面的內容
title NTU Mail-臺灣大學電子郵件系統
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping
獲取屬性
#引入requests好爬取html檔案給bs4使用 import requests response = requests.get('http://ntumail.cc.ntu.edu.tw') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') #列印attribute print(soup.img.attrs['src']) print(soup.img['src'])
images/mail20.png images/mail20.png
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping
嵌套選擇
#引入requests好爬取html檔案給bs4使用 import requests response = requests.get('http://ntumail.cc.ntu.edu.tw') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') print(soup.head.title.string) #選擇head裡的title的文本
NTU Mail-臺灣大學電子郵件系統
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping
子節點、子孫節點
#引入requests好爬取html檔案給bs4使用 import requests response = requests.get('http://ntumail.cc.ntu.edu.tw') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') #獲取所有子節點,返回list print(soup.head.contents) #把head裡的文本按照行數讀取出來 #引入requests好爬取html檔案給bs4使用 import requests response = requests.get('http://ntumail.cc.ntu.edu.tw') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') #獲取所有子節點,返回迭代器,不是list print(soup.head.children) #i是索引,child是內容 for i, child in enumerate(soup.head.children): print(i, child) #引入requests好爬取html檔案給bs4使用 import requests response = requests.get('http://ntumail.cc.ntu.edu.tw') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') #獲取所有子孫節點,返回迭代器,不是list print(soup.head.descendants) #i是索引,child是內容 for i, child in enumerate(soup.head.descendants): print(i, child)
<generator object descendants at 0x00000290B724A0A0> 0 1 <meta content="text/html; charset=utf-8" http-equiv="Content-Type"/> 2 3 <title>NTU Mail-臺灣大學電子郵件系統</title> 4 NTU Mail-臺灣大學電子郵件系統 5 6 <link href="images/style.css" rel="stylesheet" type="text/css"/> 7
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping
父節點、祖父節點
#引入requests好爬取html檔案給bs4使用 import requests response = requests.get('http://ntumail.cc.ntu.edu.tw') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') #獲取所有父節點 print(soup.img.parent) #引入requests好爬取html檔案給bs4使用 import requests response = requests.get('http://ntumail.cc.ntu.edu.tw') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') #獲取所有祖先節點,要解析迭代器 print(list(enumerate(soup.img.parents)))
[(0, <div id="imgcss"><img src="images/mail20.png"/></div>), (1, <div id="mail"> <div id="imgcss"><img src="images/mail20.png"/></div> <div id="content"> <h1><a href="https://mail.ntu.edu.tw/">NTU Mail 2.0</a></h1> <ul> <li><img align="absmiddle" src="images/face01-01.gif"/> 服務對象 <ol> <li>教職員帳號 \ Faculty Account</li> <li>公務、計畫、及短期帳號 \ Project and Short Term Account</li> <li>所有在學學生帳號 \ Internal Student Account</li> </ol> </li> <li><img align="absmiddle" src="images/m02-05-2.gif"/> 立即前往 Go to <a href="https://mail.ntu.edu.tw/">Mail 2.0</a></li> <li><img align="absmiddle" src="images/ic04-04.gif"/> <a href="http://www.cc.ntu.edu.tw/mail2.0/">Mail 2.0 FAQ</a></li> </ul> </div><!--content end--> </div>), (2, <div id="wrapper"> <div id="banner"></div> <div id="mail"> <div id="imgcss"><img src="images/mail20.png"/></div> <div id="content"> <h1><a href="https://mail.ntu.edu.tw/">NTU Mail 2.0</a></h1> <ul> <li><img align="absmiddle" src="images/face01-01.gif"/> 服務對象 <ol> <li>教職員帳號 \ Faculty Account</li> <li>公務、計畫、及短期帳號 \ Project and Short Term Account</li> <li>所有在學學生帳號 \ Internal Student Account</li> </ol> </li> <li><img align="absmiddle" src="images/m02-05-2.gif"/> 立即前往 Go to <a href="https://mail.ntu.edu.tw/">Mail 2.0</a></li> <li><img align="absmiddle" src="images/ic04-04.gif"/> <a href="http://www.cc.ntu.edu.tw/mail2.0/">Mail 2.0 FAQ</a></li> </ul> </div><!--content end--> </div><!--mail end--> <div id="webmail"> <div id="imgcss"><img src="images/webmail.png"/></div> <div id="content"> <h1><a href="http://webmail.ntu.edu.tw/">NTU Mail 1.0 (Webmail 1.0)</a></h1> <ul> <li><img align="absmiddle" src="images/face01-01.gif"/> 服務對象 <ol> <li>校友帳號 \ Alumni Account</li> <li>醫院員工帳號 \ Hospital Staff Account</li> </ol> </li> <li><img align="absmiddle" src="images/m02-05-2.gif"/> 立即前往 Go to <a href="http://webmail.ntu.edu.tw/">Webmail 1.0</a></li> <li><img align="absmiddle" src="images/ic04-04.gif"/> <a href="http://jsc.cc.ntu.edu.tw/ntucc/email/">Webmail FAQ</a></li> </ul> </div><!--content end--> </div><!--webmail end--> </div>), (3, <body> <div id="top">| <a href="http://www.ntu.edu.tw/">臺大首頁 NTU Home</a> | <a href="http://www.cc.ntu.edu.tw/">計中首頁</a> |</div> <div id="wrapper"> <div id="banner"></div> <div id="mail"> <div id="imgcss"><img src="images/mail20.png"/></div> <div id="content"> <h1><a href="https://mail.ntu.edu.tw/">NTU Mail 2.0</a></h1> <ul> <li><img align="absmiddle" src="images/face01-01.gif"/> 服務對象 <ol> <li>教職員帳號 \ Faculty Account</li> <li>公務、計畫、及短期帳號 \ Project and Short Term Account</li> <li>所有在學學生帳號 \ Internal Student Account</li> </ol> </li> <li><img align="absmiddle" src="images/m02-05-2.gif"/> 立即前往 Go to <a href="https://mail.ntu.edu.tw/">Mail 2.0</a></li> <li><img align="absmiddle" src="images/ic04-04.gif"/> <a href="http://www.cc.ntu.edu.tw/mail2.0/">Mail 2.0 FAQ</a></li> </ul> </div><!--content end--> </div><!--mail end--> <div id="webmail"> <div id="imgcss"><img src="images/webmail.png"/></div> <div id="content"> <h1><a href="http://webmail.ntu.edu.tw/">NTU Mail 1.0 (Webmail 1.0)</a></h1> <ul> <li><img align="absmiddle" src="images/face01-01.gif"/> 服務對象 <ol> <li>校友帳號 \ Alumni Account</li> <li>醫院員工帳號 \ Hospital Staff Account</li> </ol> </li> <li><img align="absmiddle" src="images/m02-05-2.gif"/> 立即前往 Go to <a href="http://webmail.ntu.edu.tw/">Webmail 1.0</a></li> <li><img align="absmiddle" src="images/ic04-04.gif"/> <a href="http://jsc.cc.ntu.edu.tw/ntucc/email/">Webmail FAQ</a></li> </ul> </div><!--content end--> </div><!--webmail end--> </div><!--wrapper end--> <div id="footer">Copyright 臺灣大學 National Taiwan University<br/> 諮詢服務電話:(02)3366-5022或3366-5023<br/> 諮詢服務信箱:[email protected]</div> </body>), (4, <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta content="text/html; charset=utf-8" http-equiv="Content-Type"/> <title>NTU Mail-臺灣大學電子郵件系統</title> <link href="images/style.css" rel="stylesheet" type="text/css"/> </head> <body> <div id="top">| <a href="http://www.ntu.edu.tw/">臺大首頁 NTU Home</a> | <a href="http://www.cc.ntu.edu.tw/">計中首頁</a> |</div> <div id="wrapper"> <div id="banner"></div> <div id="mail"> <div id="imgcss"><img src="images/mail20.png"/></div> <div id="content"> <h1><a href="https://mail.ntu.edu.tw/">NTU Mail 2.0</a></h1> <ul> <li><img align="absmiddle" src="images/face01-01.gif"/> 服務對象 <ol> <li>教職員帳號 \ Faculty Account</li> <li>公務、計畫、及短期帳號 \ Project and Short Term Account</li> <li>所有在學學生帳號 \ Internal Student Account</li> </ol> </li> <li><img align="absmiddle" src="images/m02-05-2.gif"/> 立即前往 Go to <a href="https://mail.ntu.edu.tw/">Mail 2.0</a></li> <li><img align="absmiddle" src="images/ic04-04.gif"/> <a href="http://www.cc.ntu.edu.tw/mail2.0/">Mail 2.0 FAQ</a></li> </ul> </div><!--content end--> </div><!--mail end--> <div id="webmail"> <div id="imgcss"><img src="images/webmail.png"/></div> <div id="content"> <h1><a href="http://webmail.ntu.edu.tw/">NTU Mail 1.0 (Webmail 1.0)</a></h1> <ul> <li><img align="absmiddle" src="images/face01-01.gif"/> 服務對象 <ol> <li>校友帳號 \ Alumni Account</li> <li>醫院員工帳號 \ Hospital Staff Account</li> </ol> </li> <li><img align="absmiddle" src="images/m02-05-2.gif"/> 立即前往 Go to <a href="http://webmail.ntu.edu.tw/">Webmail 1.0</a></li> <li><img align="absmiddle" src="images/ic04-04.gif"/> <a href="http://jsc.cc.ntu.edu.tw/ntucc/email/">Webmail FAQ</a></li> </ul> </div><!--content end--> </div><!--webmail end--> </div><!--wrapper end--> <div id="footer">Copyright 臺灣大學 National Taiwan University<br/> 諮詢服務電話:(02)3366-5022或3366-5023<br/> 諮詢服務信箱:[email protected]</div> </body> </html>), (5, <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta content="text/html; charset=utf-8" http-equiv="Content-Type"/> <title>NTU Mail-臺灣大學電子郵件系統</title> <link href="images/style.css" rel="stylesheet" type="text/css"/> </head> <body> <div id="top">| <a href="http://www.ntu.edu.tw/">臺大首頁 NTU Home</a> | <a href="http://www.cc.ntu.edu.tw/">計中首頁</a> |</div> <div id="wrapper"> <div id="banner"></div> <div id="mail"> <div id="imgcss"><img src="images/mail20.png"/></div> <div id="content"> <h1><a href="https://mail.ntu.edu.tw/">NTU Mail 2.0</a></h1> <ul> <li><img align="absmiddle" src="images/face01-01.gif"/> 服務對象 <ol> <li>教職員帳號 \ Faculty Account</li> <li>公務、計畫、及短期帳號 \ Project and Short Term Account</li> <li>所有在學學生帳號 \ Internal Student Account</li> </ol> </li> <li><img align="absmiddle" src="images/m02-05-2.gif"/> 立即前往 Go to <a href="https://mail.ntu.edu.tw/">Mail 2.0</a></li> <li><img align="absmiddle" src="images/ic04-04.gif"/> <a href="http://www.cc.ntu.edu.tw/mail2.0/">Mail 2.0 FAQ</a></li> </ul> </div><!--content end--> </div><!--mail end--> <div id="webmail"> <div id="imgcss"><img src="images/webmail.png"/></div> <div id="content"> <h1><a href="http://webmail.ntu.edu.tw/">NTU Mail 1.0 (Webmail 1.0)</a></h1> <ul> <li><img align="absmiddle" src="images/face01-01.gif"/> 服務對象 <ol> <li>校友帳號 \ Alumni Account</li> <li>醫院員工帳號 \ Hospital Staff Account</li> </ol> </li> <li><img align="absmiddle" src="images/m02-05-2.gif"/> 立即前往 Go to <a href="http://webmail.ntu.edu.tw/">Webmail 1.0</a></li> <li><img align="absmiddle" src="images/ic04-04.gif"/> <a href="http://jsc.cc.ntu.edu.tw/ntucc/email/">Webmail FAQ</a></li> </ul> </div><!--content end--> </div><!--webmail end--> </div><!--wrapper end--> <div id="footer">Copyright 臺灣大學 National Taiwan University<br/> 諮詢服務電話:(02)3366-5022或3366-5023<br/> 諮詢服務信箱:[email protected]</div> </body> </html> )]
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping
兄弟節點
#引入requests好爬取html檔案給bs4使用 import requests response = requests.get('http://ntumail.cc.ntu.edu.tw') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') #獲取兄弟節點 print(list(enumerate(soup.div.next_siblings))) print(list(enumerate(soup.div.previous_siblings)))
[(0, '\n'), (1, <div id="wrapper"> <div id="banner"></div> <div id="mail"> <div id="imgcss"><img src="images/mail20.png"/></div> <div id="content"> <h1><a href="https://mail.ntu.edu.tw/">NTU Mail 2.0</a></h1> <ul> <li><img align="absmiddle" src="images/face01-01.gif"/> 服務對象 <ol> <li>教職員帳號 \ Faculty Account</li> <li>公務、計畫、及短期帳號 \ Project and Short Term Account</li> <li>所有在學學生帳號 \ Internal Student Account</li> </ol> </li> <li><img align="absmiddle" src="images/m02-05-2.gif"/> 立即前往 Go to <a href="https://mail.ntu.edu.tw/">Mail 2.0</a></li> <li><img align="absmiddle" src="images/ic04-04.gif"/> <a href="http://www.cc.ntu.edu.tw/mail2.0/">Mail 2.0 FAQ</a></li> </ul> </div><!--content end--> </div><!--mail end--> <div id="webmail"> <div id="imgcss"><img src="images/webmail.png"/></div> <div id="content"> <h1><a href="http://webmail.ntu.edu.tw/">NTU Mail 1.0 (Webmail 1.0)</a></h1> <ul> <li><img align="absmiddle" src="images/face01-01.gif"/> 服務對象 <ol> <li>校友帳號 \ Alumni Account</li> <li>醫院員工帳號 \ Hospital Staff Account</li> </ol> </li> <li><img align="absmiddle" src="images/m02-05-2.gif"/> 立即前往 Go to <a href="http://webmail.ntu.edu.tw/">Webmail 1.0</a></li> <li><img align="absmiddle" src="images/ic04-04.gif"/> <a href="http://jsc.cc.ntu.edu.tw/ntucc/email/">Webmail FAQ</a></li> </ul> </div><!--content end--> </div><!--webmail end--> </div>), (2, 'wrapper end'), (3, '\n'), (4, <div id="footer">Copyright 臺灣大學 National Taiwan University<br/> 諮詢服務電話:(02)3366-5022或3366-5023<br/> 諮詢服務信箱:[email protected]</div>), (5, '\n')] [(0, '\n')]
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping
標準選擇器 find_all(name, attrs, recursive, text, **kwargs),找全部元素 可以根據標籤名稱,屬性內容查找文檔 name
import requests response = requests.get('http://www.pythonscraping.com/pages/page3.html') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') print(soup.find_all('td')) print(soup.find_all('td')[0]) import requests response = requests.get('http://www.pythonscraping.com/pages/page3.html') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') #從標籤td底下再次提取內容img for td in soup.find_all('td'): print(td.find_all('img'))
[] [] [] [<img src="../img/gifts/img1.jpg"/>] [] [] [] [<img src="../img/gifts/img2.jpg"/>] [] [] [] [<img src="../img/gifts/img3.jpg"/>] [] [] [] [<img src="../img/gifts/img4.jpg"/>] [] [] [] [<img src="../img/gifts/img6.jpg"/>]
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping
attrs
import requests response = requests.get('http://www.pythonscraping.com/pages/page3.html') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') print(soup.find_all(attrs={'id':'gift1'})) print(soup.find_all(attrs={'class':'gift'})) import requests response = requests.get('http://www.pythonscraping.com/pages/page3.html') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') #特殊屬性可以直接使用 print(soup.find_all(id='gift1')) print(soup.find_all(class_='gift'))
[<tr class="gift" id="gift1"><td> Vegetable Basket </td><td> This vegetable basket is the perfect gift for your health conscious (or overweight) friends! <span class="excitingNote">Now with super-colorful bell peppers!</span> </td><td> $15.00 </td><td> <img src="../img/gifts/img1.jpg"/> </td></tr>] [<tr class="gift" id="gift1"><td> Vegetable Basket </td><td> This vegetable basket is the perfect gift for your health conscious (or overweight) friends! <span class="excitingNote">Now with super-colorful bell peppers!</span> </td><td> $15.00 </td><td> <img src="../img/gifts/img1.jpg"/> </td></tr>, <tr class="gift" id="gift2"><td> Russian Nesting Dolls </td><td> Hand-painted by trained monkeys, these exquisite dolls are priceless! And by "priceless," we mean "extremely expensive"! <span class="excitingNote">8 entire dolls per set! Octuple the presents!</span> </td><td> $10,000.52 </td><td> <img src="../img/gifts/img2.jpg"/> </td></tr>, <tr class="gift" id="gift3"><td> Fish Painting </td><td> If something seems fishy about this painting, it's because it's a fish! <span class="excitingNote">Also hand-painted by trained monkeys!</span> </td><td> $10,005.00 </td><td> <img src="../img/gifts/img3.jpg"/> </td></tr>, <tr class="gift" id="gift4"><td> Dead Parrot </td><td> This is an ex-parrot! <span class="excitingNote">Or maybe he's only resting?</span> </td><td> $0.50 </td><td> <img src="../img/gifts/img4.jpg"/> </td></tr>, <tr class="gift" id="gift5"><td> Mystery Box </td><td> If you love suprises, this mystery box is for you! Do not place on light-colored surfaces. May cause oil staining. <span class="excitingNote">Keep your friends guessing!</span> </td><td> $1.50 </td><td> <img src="../img/gifts/img6.jpg"/> </td></tr>]
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping
text
import requests response = requests.get('http://www.pythonscraping.com/pages/page3.html') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') print(soup.find_all(text='trained monkeys')) #不知道為什麼找不到
[]
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping
find(name, attrs, recursive, text, **kwargs),返回第一個元素
import requests response = requests.get('http://www.pythonscraping.com/pages/page3.html') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') #特殊屬性可以直接使用 print(soup.find(id='gift1')) print(soup.find(class_='gift'))
<tr class="gift" id="gift1"><td> Vegetable Basket </td><td> This vegetable basket is the perfect gift for your health conscious (or overweight) friends! <span class="excitingNote">Now with super-colorful bell peppers!</span> </td><td> $15.00 </td><td> <img src="../img/gifts/img1.jpg"/> </td></tr> <tr class="gift" id="gift1"><td> Vegetable Basket </td><td> This vegetable basket is the perfect gift for your health conscious (or overweight) friends! <span class="excitingNote">Now with super-colorful bell peppers!</span> </td><td> $15.00 </td><td> <img src="../img/gifts/img1.jpg"/> </td></tr>
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping
find_parents()返回所有祖先節點, find_parent()返回父節點 find_next_siblings(), find_next_sibling() find_previous_siblings(), find_previous_sibling() find_all_next(), find_next() find_all_previous(), find_previous() CSS選擇器 select()可以直接傳入CSS選擇器即可完成
import requests response = requests.get('http://www.pythonscraping.com/pages/page3.html') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') print(soup.select('.gift')) #class前面加. print(soup.select('#gift1'))#id前面加# print(soup.select('tr td')) #印出tr中所有td的項目 import requests response = requests.get('http://www.pythonscraping.com/pages/page3.html') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') for tr in soup.select('tr'): print(tr.select('td')) #印出每個tr中的td
[] [<td> Vegetable Basket </td>, <td> This vegetable basket is the perfect gift for your health conscious (or overweight) friends! <span class="excitingNote">Now with super-colorful bell peppers!</span> </td>, <td> $15.00 </td>, <td> <img src="../img/gifts/img1.jpg"/> </td>] [<td> Russian Nesting Dolls </td>, <td> Hand-painted by trained monkeys, these exquisite dolls are priceless! And by "priceless," we mean "extremely expensive"! <span class="excitingNote">8 entire dolls per set! Octuple the presents!</span> </td>, <td> $10,000.52 </td>, <td> <img src="../img/gifts/img2.jpg"/> </td>] [<td> Fish Painting </td>, <td> If something seems fishy about this painting, it's because it's a fish! <span class="excitingNote">Also hand-painted by trained monkeys!</span> </td>, <td> $10,005.00 </td>, <td> <img src="../img/gifts/img3.jpg"/> </td>] [<td> Dead Parrot </td>, <td> This is an ex-parrot! <span class="excitingNote">Or maybe he's only resting?</span> </td>, <td> $0.50 </td>, <td> <img src="../img/gifts/img4.jpg"/> </td>] [<td> Mystery Box </td>, <td> If you love suprises, this mystery box is for you! Do not place on light-colored surfaces. May cause oil staining. <span class="excitingNote">Keep your friends guessing!</span> </td>, <td> $1.50 </td>, <td> <img src="../img/gifts/img6.jpg"/> </td>]
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping
獲取屬性
import requests response = requests.get('http://ntumail.cc.ntu.edu.tw') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') for div in soup.select('div'): print(div['id']) print(div.attrs['id'])
top top wrapper wrapper banner banner mail mail imgcss imgcss content content webmail webmail imgcss imgcss content content footer footer
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping
獲取文本內容
import requests response = requests.get('http://ntumail.cc.ntu.edu.tw') response.encoding = 'UTF-8' #加入encoding的方法避免中文亂碼 html = response.text from bs4 import BeautifulSoup soup = BeautifulSoup(html,'lxml') for li in soup.select('li'): print(li.get_text())
服務對象 教職員帳號 \ Faculty Account 公務、計畫、及短期帳號 \ Project and Short Term Account 所有在學學生帳號 \ Internal Student Account 教職員帳號 \ Faculty Account 公務、計畫、及短期帳號 \ Project and Short Term Account 所有在學學生帳號 \ Internal Student Account 立即前往 Go to Mail 2.0 Mail 2.0 FAQ 服務對象 校友帳號 \ Alumni Account 醫院員工帳號 \ Hospital Staff Account 校友帳號 \ Alumni Account 醫院員工帳號 \ Hospital Staff Account 立即前往 Go to Webmail 1.0 Webmail FAQ
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping
總結
推薦使用lxml解析庫,必要時使用html.parser 標籤選擇篩選功能弱,但是速度快 建議使用find(), find_all() 查詢匹配單個結果或多個結果 如果對CSS選擇器熟悉則用select() 記住常用的獲取attrs和text方法
_____no_output_____
MIT
BeautifulSoup.ipynb
Pytoddler/Web-scraping
Toggl Reports Downloader Script to Extract from Toggl API and create CSV Export of **Latest and Complete Timelogs** as as well as separate exports of Clients, Projects, Workspace Lists. Useful for back up purposes or additional data analysis. ---- Add Dependencies
import pandas as pd from datetime import datetime from dateutil.parser import parse import time import pytz # Toggl Wrapper API # https://github.com/matthewdowney/TogglPy import TogglPy
_____no_output_____
MIT
toggl/toggl_downloader.ipynb
Zackhardtoname/qs_ledger
---- Authentication
import json with open("credentials.json", "r") as file: credentials = json.load(file) toggl_cr = credentials['toggl'] APIKEY = toggl_cr['APIKEY'] toggl = TogglPy.Toggl() toggl.setAPIKey(APIKEY)
_____no_output_____
MIT
toggl/toggl_downloader.ipynb
Zackhardtoname/qs_ledger
----- User Data
user = toggl.request("https://www.toggl.com/api/v8/me") user_id = user['data']['id'] user['data']['fullname'] join_date = parse(user['data']['created_at']) join_date # today = datetime.now() def utcnow(): return datetime.now(tz=pytz.utc) today = utcnow() dates = list(pd.date_range(join_date, today)) print("Days Since Joining: " + str(len(dates))) # days since joining
Days Since Joining: 2058
MIT
toggl/toggl_downloader.ipynb
Zackhardtoname/qs_ledger
----- Clients
user_clients = toggl.request("https://www.toggl.com/api/v8/clients") clients = pd.DataFrame() for i in list(range(0, len(user_clients))): clients_df_temp = pd.DataFrame.from_dict(user_clients) clients = pd.concat([clients_df_temp, clients]) clients.to_csv('data/toggl-clients.csv')
_____no_output_____
MIT
toggl/toggl_downloader.ipynb
Zackhardtoname/qs_ledger
----- Workplaces API Ref: https://github.com/toggl/toggl_api_docs/blob/master/chapters/workspaces.mdget-workspaces
workspaces_list = toggl.request("https://www.toggl.com/api/v8/workspaces") len(workspaces_list) workspaces = pd.DataFrame.from_dict(workspaces_list) workspaces_dict = dict(zip(workspaces.id, workspaces.name)) workspaces.to_csv('data/toggl-workspaces.csv')
_____no_output_____
MIT
toggl/toggl_downloader.ipynb
Zackhardtoname/qs_ledger
---- Workplace Projects * API Ref: https://github.com/toggl/toggl_api_docs/blob/master/chapters/workspaces.mdget-workspace-projects* Endpoint: https://www.toggl.com/api/v8/workspaces/{workspace_id}/projects
projects = pd.DataFrame() for i in list(range(0, len(workspaces_list))): projects_list = toggl.request("https://www.toggl.com/api/v8/workspaces/" + str(workspaces_list[i]['id']) + "/projects") projects_df_temp = pd.DataFrame.from_dict(projects_list) projects = pd.concat([projects_df_temp, projects]) len(projects) # map workspace name onto projects projects['workspace_name'] = projects.wid.map(workspaces_dict) projects.head(3) # total time of active projects projects.actual_hours.sum() projects.to_csv('data/toggl-current-projects.csv')
_____no_output_____
MIT
toggl/toggl_downloader.ipynb
Zackhardtoname/qs_ledger
---- Collect Yearly Export of Detailed Timelogs
def get_detailed_reports(wid, since, until): # max 365 days uid = user_id param = { 'workspace_id': wid, 'since': since, 'until': until, 'uid': uid } #print(str(workspace_id) + " " + since) toggl.getDetailedReportCSV(param, "data/detailed/toggl-detailed-report-" + wid + "-" + since + "-" + until + ".csv") # years since joinging last_year = today.year + 1 years = list(range(join_date.year, last_year)) years # list of workspace ids workspace_ids = [] for i in workspaces_list: workspace_ids.append(i['id']) # workspace_ids workspace_ids # Generate Detail CSV Tester workspace_id = "373504" since = "2017-01-01" until = "2017-12-31" get_detailed_reports(workspace_id, since, until) # generate a yearly report for each workspace for i in workspace_ids: wid = str(i) for y in years: try: since = str(y) + "-01-01" # "2013-01-01" until = str(y) + "-12-31" # "2013-12-31" print("Generating CSV... " + "for Workspace: " + str(wid) + " from " + since + " until " + until) get_detailed_reports(wid, since, until) except: print("ERROR On: " + str(uid) + " " + str(wid) + " from " + since + " until " + until)
Generating CSV... for Workspace: 341257 from 2013-01-01 until 2013-12-31 Generating CSV... for Workspace: 341257 from 2014-01-01 until 2014-12-31 Generating CSV... for Workspace: 341257 from 2015-01-01 until 2015-12-31 Generating CSV... for Workspace: 341257 from 2016-01-01 until 2016-12-31 Generating CSV... for Workspace: 341257 from 2017-01-01 until 2017-12-31 Generating CSV... for Workspace: 341257 from 2018-01-01 until 2018-12-31 Generating CSV... for Workspace: 373504 from 2013-01-01 until 2013-12-31 Generating CSV... for Workspace: 373504 from 2014-01-01 until 2014-12-31 Generating CSV... for Workspace: 373504 from 2015-01-01 until 2015-12-31 Generating CSV... for Workspace: 373504 from 2016-01-01 until 2016-12-31 Generating CSV... for Workspace: 373504 from 2017-01-01 until 2017-12-31 Generating CSV... for Workspace: 373504 from 2018-01-01 until 2018-12-31 Generating CSV... for Workspace: 1234339 from 2013-01-01 until 2013-12-31 Generating CSV... for Workspace: 1234339 from 2014-01-01 until 2014-12-31 Generating CSV... for Workspace: 1234339 from 2015-01-01 until 2015-12-31 Generating CSV... for Workspace: 1234339 from 2016-01-01 until 2016-12-31 Generating CSV... for Workspace: 1234339 from 2017-01-01 until 2017-12-31 Generating CSV... for Workspace: 1234339 from 2018-01-01 until 2018-12-31
MIT
toggl/toggl_downloader.ipynb
Zackhardtoname/qs_ledger
----- Log of Latest Time Entries for that User * API Ref: https://github.com/toggl/toggl_api_docs/blob/master/chapters/time_entries.mdget-time-entries-started-in-a-specific-time-range* Endpoint: https://www.toggl.com/api/v8/time_entries * Note: start_date and end_date must be ISO 8601 date and time strings.
# latest_time_entries from last 9 days latest_time_entries = toggl.request("https://www.toggl.com/api/v8/time_entries") len(latest_time_entries) latest_time_entries[-1] latest_timelog = pd.DataFrame.from_dict(latest_time_entries) latest_timelog.tail() latest_timelog.head() latest_timelog.to_csv('data/toggl-timelog-latest.csv')
_____no_output_____
MIT
toggl/toggl_downloader.ipynb
Zackhardtoname/qs_ledger
----- BONUS: Extract Times Entries for Every Single Day Using Toggl API **NOTE:** A bit of a hackish solution. But this is a possible approach to getting individual day logs.
extract_date_start = join_date.strftime("%Y-%m-%d") # join date extract_date_end = today.strftime("%Y-%m-%d") # today # UNCOMMENT TO Overide Full Extract extract_date_start = "2018-05-23" # extract_date_end = "2018-05-01".strftime("%Y-%m-%d") # extract_date_end = today.strftime("%Y-%m-%d") # today # Function that turns datetimes back to strings since that's what the API likes def date_only(datetimeVal): datePart = datetimeVal.strftime("%Y-%m-%d") return datePart # List of Dates of Dates to Extract Time Entries dates_range = list(pd.date_range(extract_date_start, extract_date_end)) dates_list = [date_only(x) for x in dates_range] # Extract Timelogs Between Two Dates and Export to a CSV def toggl_timelog_extractor(input_date1, input_date2): date1 = parse(input_date1).isoformat() + '+00:00' date2 = parse(input_date2).isoformat() + '+00:00' param = { 'start_date': date1, 'end_date': date2, } try: temp_log = pd.DataFrame.from_dict(toggl.request("https://www.toggl.com/api/v8/time_entries", parameters=param)) temp_log.to_csv('data/detailed/toggl-time-entries-' + input_date1 + '.csv') except: # try again if there is an issue the first time temp_log = pd.DataFrame.from_dict(toggl.request("https://www.toggl.com/api/v8/time_entries", parameters=param)) temp_log.to_csv('data/daily-detailed/toggl-time-entries-' + input_date1 + '.csv') # UNCOMMENT to Test Between Two Date # date1 = '2013-07-23' # date2 = '2013-07-24' # toggl_timelog_extractor(date1, date2) # UNCOMMENT TO RUN # Extract All Time Entry Data from Previous Days #for count, item in enumerate(dates_list): # if item != dates_list[-1]: # date1 = item # date2 = (dates_list[count + 1]) # # print(item + " ~ "+ date2) # time.sleep(1) # toggl_timelog_extractor(date1, date2)
_____no_output_____
MIT
toggl/toggl_downloader.ipynb
Zackhardtoname/qs_ledger
----- Simple Data Analysis (Using Exported CSV Logs)
import glob import os # import all days of time entries and create data frame path = 'data/detailed/' allFiles = glob.glob(path + "/*.csv") timelogs = pd.DataFrame() list_ = [] for file_ in allFiles: df = pd.read_csv(file_,index_col=None, header=0) list_.append(df) timelog = pd.concat(list_) timelog.head() len(timelog) # drop unused columns timelog = timelog.drop(['Email', 'User', 'Amount ()', 'Client', 'Billable'], axis=1) # helper functions to convert duration string to seconds def get_sec(time_str): h, m, s = time_str.split(':') return int(h) * 3600 + int(m) * 60 + int(s) # get_sec("01:16:36") def dur2sec(row): return get_sec(row['Duration']) # timelog.apply(dur2sec, axis=1) timelog['seconds'] = timelog.apply(dur2sec, axis=1) timelog.info() timelog.describe() timelog.head() timelog.tail() # Total hours round((timelog.seconds.sum() / 60 / 60), 1) # total days round((timelog.seconds.sum() / 60 / 60 / 24), 1) timelog.to_csv("data/toggl-detailed-logs-full-export.csv")
_____no_output_____
MIT
toggl/toggl_downloader.ipynb
Zackhardtoname/qs_ledger
----- Combine to a Daily Project Time Number
# combine to daily number daily_project_time = timelog.groupby(['Start date'])['seconds'].sum() print('{:,} total project time data'.format(len(daily_project_time))) daily_project_time.to_csv('data/daily_project_time.csv') daily_project_time.tail(5)
1,924 total project time data
MIT
toggl/toggl_downloader.ipynb
Zackhardtoname/qs_ledger
Market Basket Analysis IntroductionAttribution Chris Moffitt at http://pbpython.com/ Assiciationsanalys anses generellt tillhöra de oövervakade inlärningsmetoderna och kan exempelvis användas för att hitta gemensamma mönster bland stora datamängder med transaktionsdata. Ett applikationsområde blir därmed den så kallade *market basket analysis*.Det finns flera olika algoritmer som kan användas för detta, en av de vanligaste heter apriori. Se mer om market basket analysis exempelvis [här](https://www.youtube.com/watch?v=guVvtZ7ZClw) eller läs en väldigt kort introduktion [här](https://analyticsindiamag.com/hands-on-guide-to-market-basket-analysis-with-python-codes/). Vad det kort handlar om är helt enkelt att vi vill ta reda på hur olika typer av köpmönster, relaterat till produkter, ser ut. Något i stil med att om mina kunder köper hårspray, hur troligt är det då att de också köper schampo? Vad kan vi använda detta till? Ja, i förlängningen kan det vara användbart för att exempelvis ge rekommendationer till kunder, i stil med hur[ Amazon](https://www.amazon.se/Data-Science-John-Kelleher/dp/0262535432/ref=sr_1_1?dchild=1&keywords=data+science&qid=1614593355&sr=8-1) gör.Se också föreläsningen om oövervakad inlärning, samt [mlxtends dokumentation](http://rasbt.github.io/mlxtend/)
import pandas as pd from mlxtend.frequent_patterns import apriori from mlxtend.frequent_patterns import association_rules df = pd.read_excel('http://archive.ics.uci.edu/ml/machine-learning-databases/00352/Online%20Retail.xlsx') df.head()
_____no_output_____
Apache-2.0
Market_Basket_Intro.ipynb
UU-IM-EU/Code_along4
Som vanligt börjar vi med att bekanta oss med det data vi har, vad är det för typ av data?Därefter behöver vi (som alltid) städa vårt data och se till att dess format passar den typ av analys vi ska genomföra.
# Städa upp mellanslag och ta bort rader som inte har ett giltligt kvitto. df['Description'] = df['Description'].str.strip() df.dropna(axis=0, subset=['InvoiceNo'], inplace=True) #Ta bort kvitton från kreditkortstransaktioner df['InvoiceNo'] = df['InvoiceNo'].astype('str') df = df[~df['InvoiceNo'].str.contains('C')]
_____no_output_____
Apache-2.0
Market_Basket_Intro.ipynb
UU-IM-EU/Code_along4
För att kunna köra våra algoritmer behöver vi också se till att ändra om vårt data så att varje rad representerar en transaktion och varje produkt har en egen kolumn.
#Vi startar också med att enbart analysera data från köp gjorda i Frankrike så att det inte blir alltför mycket data. basket = (df[df['Country'] == "France"] .groupby(['InvoiceNo', 'Description'])['Quantity'] .sum().unstack().reset_index().fillna(0) .set_index('InvoiceNo'))
_____no_output_____
Apache-2.0
Market_Basket_Intro.ipynb
UU-IM-EU/Code_along4
Så här ser vårt dataset ut när vi format om det som vi vill ha det för vår associationsanalys.
basket.head()
_____no_output_____
Apache-2.0
Market_Basket_Intro.ipynb
UU-IM-EU/Code_along4
Hur många produkter säljer företaget i Frankrike?
# Titta på några av kolumnerna, vad är det vi ser? basket.iloc[:,[0,1,2,3,4,5,6, 7]].head()
_____no_output_____
Apache-2.0
Market_Basket_Intro.ipynb
UU-IM-EU/Code_along4
Vi behöver också koda om med `one-hot encoding` så att en produkt som inhandlats i en viss transaktion representeras av 1 och frånvaron av en specifik produkt i en transaktion representeras av 0. Det medför att vårt dataset blir väldigt glest, varför? **OBS!** One hot encoding kan göras på olika sätt!
# Konvertera till 1 för produkt köpt och 0 för produkt inte köpt. def encode_units(x): if x <= 0: return 0 if x >= 1: return 1 basket_sets = basket.applymap(encode_units) # Ta bort onödig data basket_sets.drop('POSTAGE', inplace=True, axis=1) basket_sets.head()
_____no_output_____
Apache-2.0
Market_Basket_Intro.ipynb
UU-IM-EU/Code_along4
Att mäta associeringsregler För att ta reda på vilka associationsregler som är värdefulla krävs mycket domänkunskap. Det finns dock också några mätvärden som kan användas för att hjälpa till att avgöra kvaliteten på reglerna och för att veta hur mycket vikt vi bör lägga vid en specifik regel. Det finns tre huvudsakliga sätt att mäta associeringsregler:**Support**Support är antalet transaktioner som innehåller ett specifierat antal produkter. Ju oftare dessa produkter förekommer gemensamt (alltså idetta fallet köpts gemensamt) desto större blir vikten av supporten.Om transaktionsdata ser ut enligt följande:```t1: Beef, Carrot, Milkt2: Steak, Cheeset3: Cheese, Flingort4: Steak, Carrot, Cheeset5: Steak, Carrot, Butter, Cheese, Milkt6: Carrot, Butter, Milkt7: Carrot, Milk, Butter```Skulle supporten för att kombinationen morötter, smör och mjölk köps tillsammans se ut enligt följande:$$Support(Carrot \land Butter \land Milk) = \frac{3}{7} = 0.43$$detta på grund av att en kombination av dessa tre produkter förekommer 3 gånger av 7 möjliga transaktioner. **Confidence**Konfidens innebär att om vi har en regel som säger följande: $Beef, Chicken \rightarrow Apple$ med en konfidens på 33%, så innebär det att om det finns biff och kyckling i någons shoppingvagn så är det 33% chans att det också finns äpplen. Konfidensen beräknas exempelvis såhär: Givet följande regel: $Butter \rightarrow Milk, Chicken$$$Butter \rightarrow Milk, Chicken = \frac{Support (Butter \land Milk \land Chicken)}{Support (Butter)}$$ **Lift**Lift ger oss ett mätvärde på hur bra en regel är, baserat enbart på den högra sidan av en regel(alltså $Consequent$). Detta innebär att exempelvis regler som inkluderar vanliga produkter som $Consequent$ så kommer reglen inte säga någoting av värde. Det är alltså inte meningsfullt att ha mjölk, som är en väldigt vanlig produkt, på den högra sidan i en regel. Tumregeln för Lift är följande: Om Lift är $>1$ så är regeln bättre än att gissa.Om Lift är $\leq1$ så är regeln ungefär likvärdig med en ren gissning. Exempel:$$Chicken \rightarrow Milk = \frac{Support (Chicken \land Milk)}{Support(Chicken) \times Support (Milk)} = \frac{(4 / 7)}{(5 / 7) \times (4 / 7)} = 1.4$$Detta implicerar att $Chicken \rightarrow Milk$ skulle kunna vara en bra regel eftersom $1.4 > 1$. Om vi ändrar support för hur ofta mjölk inhandlas till $6 / 7$ istället så blir resultatet ett annat.$$Chicken \rightarrow Milk = \frac{Support (Chicken \land Milk)}{Support(Chicken) \times Support (Milk)} = \frac{(4 / 7)}{(5 / 7) \times (6 / 7)} = 0.933$$Nu ser samma regel: $Chicken \rightarrow Milk$ inte längre ut som en bra regel eftersom $0.933 < 1$. Det finns dessutom ett antal fler mätvärden såsom **Leverage** och **Conviction** som vi kan använda för att avgöra vilka mönster som är intressanta att titta närmare på men dessa kommer vi inte gå igenom i kursen. För den som är intresserad kan ni dock läsa mer [här](https://www.diva-portal.org/smash/get/diva2:956424/FULLTEXT01.pdf), [här](https://michael.hahsler.net/research/recommender/associationrules.html) och [här](https://paginas.fe.up.pt/~ec/files_0506/slides/04_AssociationRules.pdf). **Enligt mlextendbiblioteket beräknas de olika måtten enligtr följande**- support(A->C) = support(A+C) [aka 'support'], range: [0, 1]- confidence(A->C) = support(A+C) / support(A), range: [0, 1]- lift(A->C) = confidence(A->C) / support(C), range: [0, inf]- leverage(A->C) = support(A->C) - support(A)*support(C),range: [-1, 1]- conviction = [1 - support(C)] / [1 - confidence(A->C)],range: [0, inf] AssociationsanalysStarta med att bygga upp vårt `frequent itemset` med hjälp av algoritmen `apriori`. Vad anser vi vara gränsen för att vi ska anse att en produkt förekommer ofta?
frequent_itemsets = apriori(basket_sets, min_support=0.07, use_colnames=True) frequent_itemsets.head() # Skapa själva reglerna varvid de olika mätvärdena också beräknas. rules = association_rules(frequent_itemsets, metric="lift", min_threshold=1) rules #Beräkna antal antecendant för varje regel rules["num_antecedents"] = rules["antecedents"].apply(lambda x: len(x)) rules grocery_rules_3_items = rules[rules.num_antecedents >= 2] grocery_rules_3_items #Strängare regler rules[ (rules['lift'] >= 6) & (rules['confidence'] >= 0.8) ] basket['ALARM CLOCK BAKELIKE GREEN'].sum() basket['ALARM CLOCK BAKELIKE RED'].sum() Vi tittar på transaktioner från Tyskland också som jämförelse. basket2 = (df[df['Country'] =="Germany"] .groupby(['InvoiceNo', 'Description'])['Quantity'] .sum().unstack().reset_index().fillna(0) .set_index('InvoiceNo')) #Encoding basket_sets2 = basket2.applymap(encode_units) basket_sets2.drop('POSTAGE', inplace=True, axis=1) #Frekventa artiklar frequent_itemsets2 = apriori(basket_sets2, min_support=0.05, use_colnames=True) #Regler rules2 = association_rules(frequent_itemsets2, metric="lift", min_threshold=1) rules2 rules2["num_antecedents"] = rules2["antecedents"].apply(lambda x: len(x)) rules2 grocery_rules_3_items = rules2[rules2.num_antecedents >= 2] grocery_rules_3_items #Strängare regler rules2[ (rules2['lift'] >= 4) & (rules2['confidence'] >= 0.5) ]
_____no_output_____
Apache-2.0
Market_Basket_Intro.ipynb
UU-IM-EU/Code_along4
Load an image from a URL
from geodatatool import visual visual.load_image_from_url("https://upload.wikimedia.org/wikipedia/commons/6/61/Remote_Sensing_Illustration.jpg")
_____no_output_____
MIT
docs/notebooks/visual_intro.ipynb
clancygeodata/geodatatool
Display a YouTube video
from geodatatool import visual visual.display_youtube("Ezn1ne2Fj6Y")
_____no_output_____
MIT
docs/notebooks/visual_intro.ipynb
clancygeodata/geodatatool
**Day 3 - Task 1**: Plot the boxplot (column imdb_score) of the colored and bw films
imdb_has_attr_color = imdb.dropna(subset=['color']) sns.boxplot(data = imdb_has_attr_color, x ="color", y="imdb_score") plt.gcf().set_size_inches(3, 6)
_____no_output_____
MIT
ImdbTasks.ipynb
cardosorrenan/alura-QuarentenaDados
**Day 3 - Task 2**: In the graph (budget x gross), we have a point with a high gross value (close to 2.5) and also a high loss, find this movie
imdb = imdb.drop_duplicates() imdb_usa = imdb.query("country == 'USA'") sns.scatterplot(x="budget", y="gross", data = imdb_usa) imdb_usa.query('budget > 250000000 & gross < 100000000')['movie_title']
_____no_output_____
MIT
ImdbTasks.ipynb
cardosorrenan/alura-QuarentenaDados
**Day 3 - Task 4**: What are the films that came before the 2WW decade and have high gains
imdb_usa['earnings'] = imdb_usa['gross'] - imdb_usa['budget'] sns.scatterplot(x="title_year", y="earnings", data = imdb_usa) imdb_usa.query('title_year > 1935 & title_year < 1940 & earnings > 150000000')[['movie_title', 'title_year', 'gross']]
_____no_output_____
MIT
ImdbTasks.ipynb
cardosorrenan/alura-QuarentenaDados
**Day 3 - Task 5**: In the graph (movies_per_director x gross), we have some strange points between 15 and 20. Confirm Paulo's theory that the director is Woody Allen
movies_director = imdb_usa.groupby('director_name')['director_name'].count().rename('movies_director') gross_director_movies = imdb_usa[['director_name', 'gross', 'movie_title']].merge(movies_director, on='director_name') sns.scatterplot(x="movies_director", y="gross", data = gross_director_movies) gross_director_movies.query('movies_director == 18').sort_values('gross').head() movies_director.sort_values()
_____no_output_____
MIT
ImdbTasks.ipynb
cardosorrenan/alura-QuarentenaDados
**Day 3 - Task 7**: Calculate the correlation of films only after the 2000s
imdb_usa_af2000 = imdb_usa.query('title_year > 2000') imdb_usa_af2000[["gross", "budget", "earnings", "title_year"]].corr()
_____no_output_____
MIT
ImdbTasks.ipynb
cardosorrenan/alura-QuarentenaDados
**Day 3 - Task 8**: Try to find a graph that looks like a line
sns.lineplot(data = imdb_usa.query('title_year > 2005').groupby('title_year')['gross'].mean())
_____no_output_____
MIT
ImdbTasks.ipynb
cardosorrenan/alura-QuarentenaDados
**Day 3 - Task 9**: Show the correlation between other variables present in the dataframe. Counting revisions per year can also be a resource.
imdb_usa[["num_user_for_reviews", "num_voted_users"]].corr() imdb_usa[["actor_1_facebook_likes", "cast_total_facebook_likes"]].corr() sns.lineplot(data = imdb_usa.groupby('title_year')['num_voted_users'].sum())
_____no_output_____
MIT
ImdbTasks.ipynb
cardosorrenan/alura-QuarentenaDados
Most examples work across multiple plotting backends, this example is also available for:* [Matplotlib Directed Airline Routes](../matplotlib/directed_airline_routes.ipynb)
import networkx as nx import holoviews as hv from holoviews import opts from holoviews.element.graphs import layout_nodes from bokeh.sampledata.airport_routes import routes, airports hv.extension('bokeh')
_____no_output_____
BSD-3-Clause
examples/gallery/demos/bokeh/directed_airline_routes.ipynb
ppwadhwa/holoviews
Declare data
# Create dataset indexed by AirportID and with additional value dimension airports = hv.Dataset(airports, ['AirportID'], ['Name', 'IATA', 'City']) label = 'Alaska Airline Routes' # Select just Alaska Airline routes as_graph = hv.Graph((routes[routes.Airline=='AS'], airports), ['SourceID', "DestinationID"], 'Airline', label=label) as_graph = layout_nodes(as_graph, layout=nx.layout.fruchterman_reingold_layout) labels = hv.Labels(as_graph.nodes, ['x', 'y'], ['IATA', 'City'], label=label)
_____no_output_____
BSD-3-Clause
examples/gallery/demos/bokeh/directed_airline_routes.ipynb
ppwadhwa/holoviews
Plot
(as_graph * labels).opts( opts.Graph(directed=True, node_size=8, bgcolor='gray', xaxis=None, yaxis=None, edge_line_color='white', edge_line_width=1, width=800, height=800, arrowhead_length=0.01, node_fill_color='white', node_nonselection_fill_color='black'), opts.Labels(xoffset=-0.04, yoffset=0.03, text_font_size='10pt'))
_____no_output_____
BSD-3-Clause
examples/gallery/demos/bokeh/directed_airline_routes.ipynb
ppwadhwa/holoviews
Starbucks Capstone Challenge IntroductionThis data set contains simulated data that mimics customer behavior on the Starbucks rewards mobile app. Once every few days, Starbucks sends out an offer to users of the mobile app. An offer can be merely an advertisement for a drink or an actual offer such as a discount or BOGO (buy one get one free). Some users might not receive any offer during certain weeks. Not all users receive the same offer, and that is the challenge to solve with this data set.Your task is to combine transaction, demographic and offer data to determine which demographic groups respond best to which offer type. This data set is a simplified version of the real Starbucks app because the underlying simulator only has one product whereas Starbucks actually sells dozens of products.Every offer has a validity period before the offer expires. As an example, a BOGO offer might be valid for only 5 days. You'll see in the data set that informational offers have a validity period even though these ads are merely providing information about a product; for example, if an informational offer has 7 days of validity, you can assume the customer is feeling the influence of the offer for 7 days after receiving the advertisement.You'll be given transactional data showing user purchases made on the app including the timestamp of purchase and the amount of money spent on a purchase. This transactional data also has a record for each offer that a user receives as well as a record for when a user actually views the offer. There are also records for when a user completes an offer. Keep in mind as well that someone using the app might make a purchase through the app without having received an offer or seen an offer. ExampleTo give an example, a user could receive a discount offer buy 10 dollars get 2 off on Monday. The offer is valid for 10 days from receipt. If the customer accumulates at least 10 dollars in purchases during the validity period, the customer completes the offer.However, there are a few things to watch out for in this data set. Customers do not opt into the offers that they receive; in other words, a user can receive an offer, never actually view the offer, and still complete the offer. For example, a user might receive the "buy 10 dollars get 2 dollars off offer", but the user never opens the offer during the 10 day validity period. The customer spends 15 dollars during those ten days. There will be an offer completion record in the data set; however, the customer was not influenced by the offer because the customer never viewed the offer. CleaningThis makes data cleaning especially important and tricky.You'll also want to take into account that some demographic groups will make purchases even if they don't receive an offer. From a business perspective, if a customer is going to make a 10 dollar purchase without an offer anyway, you wouldn't want to send a buy 10 dollars get 2 dollars off offer. You'll want to try to assess what a certain demographic group will buy when not receiving any offers. Final AdviceBecause this is a capstone project, you are free to analyze the data any way you see fit. For example, you could build a machine learning model that predicts how much someone will spend based on demographics and offer type. Or you could build a model that predicts whether or not someone will respond to an offer. Or, you don't need to build a machine learning model at all. You could develop a set of heuristics that determine what offer you should send to each customer (i.e., 75 percent of women customers who were 35 years old responded to offer A vs 40 percent from the same demographic to offer B, so send offer A). Data SetsThe data is contained in three files:* portfolio.json - containing offer ids and meta data about each offer (duration, type, etc.)* profile.json - demographic data for each customer* transcript.json - records for transactions, offers received, offers viewed, and offers completedHere is the schema and explanation of each variable in the files:**portfolio.json*** id (string) - offer id* offer_type (string) - type of offer ie BOGO, discount, informational* difficulty (int) - minimum required spend to complete an offer* reward (int) - reward given for completing an offer* duration (int) - time for offer to be open, in days* channels (list of strings)**profile.json*** age (int) - age of the customer * became_member_on (int) - date when customer created an app account* gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F)* id (str) - customer id* income (float) - customer's income**transcript.json*** event (str) - record description (ie transaction, offer received, offer viewed, etc.)* person (str) - customer id* time (int) - time in hours since start of test. The data begins at time t=0* value - (dict of strings) - either an offer id or transaction amount depending on the record**Note:** If you are using the workspace, you will need to go to the terminal and run the command `conda update pandas` before reading in the files. This is because the version of pandas in the workspace cannot read in the transcript.json file correctly, but the newest version of pandas can. You can access the termnal from the orange icon in the top left of this notebook. You can see how to access the terminal and how the install works using the two images below. First you need to access the terminal:Then you will want to run the above command:Finally, when you enter back into the notebook (use the jupyter icon again), you should be able to run the below cell without any errors.
import pandas as pd import numpy as np import math import json % matplotlib inline # read in the json files portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True) profile = pd.read_json('data/profile.json', orient='records', lines=True) transcript = pd.read_json('data/transcript.json', orient='records', lines=True)
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Reading The Datasets
portfolio.head(10) portfolio.shape[0] portfolio.shape[1] print('portfolio: rows = {} ,columns = {}'.format((portfolio.shape[0]),(portfolio.shape[1]))) portfolio.describe() portfolio.info() portfolio.offer_type.value_counts() portfolio.reward.value_counts() import matplotlib.pyplot as plt plt.figure(figsize=[6,6]) fig, ax = plt.subplots() y_counts = portfolio['offer_type'].value_counts() y_counts.plot(kind='barh').invert_yaxis() for i, v in enumerate(y_counts): ax.text(v, i, str(v), fontsize=14) plt.title('Different offer types')
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Discount and bogo are equally given and on maximum times
plt.figure(figsize=[6,6]) fig, ax = plt.subplots() y_counts = portfolio['duration'].value_counts() y_counts.plot(kind='barh').invert_yaxis() for i, v in enumerate(y_counts): ax.text(v, i, str(v), color='black', fontsize=14) plt.title('Different offer types\' duartion')
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Here we can see that most of the offers are for the duration of 7 days Profile
profile.head(8) print('profile: rows = {} ,columns = {}'.format((profile.shape[0]),(profile.shape[1]))) profile.describe() profile.isnull().sum() profile.shape import seaborn as sns plt.figure(figsize=[6,6]) fig, ax = plt.subplots() y_counts = profile['gender'].value_counts() y_counts.plot(kind='barh').invert_yaxis() for i, v in enumerate(y_counts): ax.text(v, i, str(v), color='black', fontsize=14) plt.title('Count of Genders') plt.pie(profile['gender'].value_counts() , labels = ['Male' , 'Female' , 'Other'])
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Mostly male are interested in the offers and they are the major ones Transcript
transcript.head(9) transcript.describe() transcript.info() print('transcript: rows = {} ,columns = {}'.format((profile.shape[0]),(profile.shape[1])))
transcript: rows = 17000 ,columns = 5
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Cleaning The Datasets PortfolioRenaming 'id' to 'offer_id'
portfolio.columns = ['channels', 'difficulty', 'duration', 'offer_id', 'offer_type', 'reward'] portfolio.columns portfolio.head()
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
ProfileRenaming 'id' to 'customer_id' , filling the missing values of age and income with mean value , filling the missing values of gender with mode
profile.columns profile.columns = ['age', 'became_member_on', 'gender', 'customer_id', 'income'] profile.columns profile['age'].fillna(profile['age'].mean()) #filling missing age with average age profile['income'].fillna(profile['income'].mean()) #filling missing income with average income profile['gender'].fillna(profile['gender'].mode()[0]) #filling missing gender with the most occuring gender profile.head() profile.isnull().sum()
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
So there is not any missing value remaining in the profile dataframe TranscriptRenaming 'person' to 'customer_id' , splitting the 'value' column based on its keys anddropping the unnecessary columns
transcript.columns transcript.columns = ['event', 'customer_id', 'time', 'value'] #changing the column name transcript.head() transcript.value.astype('str').value_counts().to_dict() #converting the values in the column 'value' to dictionary transcript['offer_id'] = transcript.value.apply(lambda x: x.get('offer_id')) #splitting the 'value' into separate columns.here is 'offer_id' transcript['offer id'] = transcript.value.apply(lambda x: x.get('offer id')) #splitting the 'value' into separate columns.here is 'offer id' transcript['offer_id'] = transcript.apply(lambda x : x['offer id'] if x['offer_id'] == None else x['offer_id'], axis=1) #merging both 'offer id' and 'offer_id' into the same column 'offer_id' transcript.drop('offer id',axis = 1,inplace = True) transcript.head(10) #splitting the reward and amount values in the 'value' transcript['offer_reward'] = transcript['value'].apply(lambda x: x.get('reward')) transcript['amount'] = transcript['value'].apply(lambda x: x.get('amount')) transcript.drop('value' ,inplace = True , axis = 1) transcript.isnull().sum() transcript.fillna(0 , inplace = True) #filling the missing values with 0 transcript.head(10)
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Exploratory Data Analysis Now we will merge the dataframes
merge_df = pd.merge(portfolio, transcript, on='offer_id')#merging portfolio and transcript dataframes on the basis of 'offer_id' final_df = pd.merge(merge_df, profile, on='customer_id')#merging the merged dataframe of portfolio and transcript with profile dataframe on the basis of 'customer-id' #Exploring the final merged dataframe final_df
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Now we will see the different offer types and their counts
final_df['offer_type'].value_counts().plot.barh(title = 'Offer types with their counts')
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
So,we can see that discount and bogo are thr most given offer types Now we will see the different events and their counts
final_df['event'].value_counts().plot.barh(title = 'Different events and their counts')
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
So,in most of the cases offer is received by the user and it is not completed by him/her,means most of the people just ignore the offers they receive Now we will analyse this data on the basis of the age of the customers
sns.distplot(final_df['age'] , bins = 50 , hist_kws = {'alpha' : 0.4});
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
As we can see that the people after the age of 100 are just acting as outliers,so we will remove them
final_df = final_df[final_df['age']<=100] # Now seeing the distortion plot of age sns.distplot(final_df['age'] , bins = 50 , hist_kws = {'alpha' : 0.4});
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
We can observe that most of the customers are within the age group of 45-60 are the most frequent customers and more than any other group,this is quite interesting. Now,we will analyse this data on the basis of income of the customers
sns.distplot(final_df['income'] , bins = 50 , hist_kws = {'alpha' : 0.4}); final_df['income'].mean()
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Now we can see that most people who are the customers of Starbucks have their income within the range of 55k - 75k with a mean income of 66413.35 Now,we will see how our final dataframe is depedent on the 'gender' feature
final_df['gender'].value_counts().plot.barh(title = 'Analysing the gender of customers')
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
So,we can see that most of the customers are male We will analyse the dataframe on the basis of 'offer_type' on the basis of gender
sns.countplot(x = 'offer_type' , hue = 'gender' , data = final_df)
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
We can see that the count of gender weather it is male or female is approximately equal in the bogo and discount offers Now,we will see the relation between gender and events
sns.countplot(x = 'event' , hue = 'gender' , data = final_df)
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
So,from the exploratory data analysis we can see that most of the customers just receive the offers and they do not view them and the people who complete the offers they receive is quite less and most of the offers made by Starbuks are BOGO and Discount and most of the people that are the customers are within the age group of 45-60 and the most common gender is male and the people who are the customers of Starbucks have their income within the range of 55k - 75k Making a Machine Learning Model First analysing our final dataset
final_df
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
We will now encode the categorical features like 'offer_type' , 'gender' , 'age' We will encode the offer_id and customer_id
final_df = pd.get_dummies(final_df , columns = ['offer_type' , 'gender' , 'age']) #processing offer_id offer_id = final_df['offer_id'].unique().tolist() offer_map = dict( zip(offer_id,range(len(offer_id))) ) final_df.replace({'offer_id': offer_map},inplace=True) #processing customer_id customer_id = final_df['customer_id'].unique().tolist() customer_map = dict( zip(customer_id,range(len(customer_id))) ) final_df.replace({'customer_id': customer_map},inplace=True) final_df.head()
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Now we will scale the numerical data including 'income' , 'difficulty' , 'duration' and many more...
from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() numerical_columns = ['income' , 'difficulty' , 'duration' , 'offer_reward' , 'time' , 'reward' , 'amount'] final_df[numerical_columns] = scaler.fit_transform(final_df[numerical_columns]) final_df.head()
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
We will encode the values in the 'event' column
final_df['event'] = final_df['event'].map({'offer received':1, 'offer viewed':2, 'offer completed':3}) final_df2 = final_df.drop('event' , axis = 1)
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Now encoding the channels column
final_df2['web'] = final_df2['channels'].apply(lambda x : 1 if 'web' in x else 0) final_df2['mobile'] = final_df2['channels'].apply(lambda x : 1 if 'mobile' in x else 0) final_df2['social'] = final_df2['channels'].apply(lambda x : 1 if 'social' in x else 0) final_df2['email'] = final_df2['channels'].apply(lambda x : 1 if 'email' in x else 0) #Now dropping the Channels column final_df2.drop('channels' , axis = 1 , inplace = True) final_df2['became_member_on'] = final_df2['became_member_on'].apply(lambda x: pd.to_datetime(str(x), format='%Y%m%d')) #adding new columns for month & year final_df2['month_member'] = final_df2['became_member_on'].apply(lambda x: x.day) final_df2['year_member'] = final_df2['became_member_on'].apply(lambda x: x.year) #dropping the became_member_on column final_df2.drop('became_member_on',axis=1, inplace=True) final_df2.shape
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Training Our Dataset Now splitting our 'final_df' into training and test set
independent_variables = final_df2 #our dataset containing all the independent variables excluding the 'event' dependent_variable = final_df['event'] #our final dataset containing the 'event' from sklearn.model_selection import train_test_split # splitting our dataset into training and test set and the test set being the 30% of the total dataset x_train , x_test, y_train , y_test = train_test_split(independent_variables , dependent_variable , test_size = 0.3 , random_state = 1) x_train.shape x_test.shape
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Testing Our Dataset
# We will implement a number of classification machine learning methods and will determine which method is best for our model from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier #We will test the quality of the predicted output on a number of metrics,i.e. accuracy score,f1 score #We will use f1 score because it considers the class imbalance pretty well as compared to the accuracy score and is the best metric to evaluate our this model from sklearn.metrics import confusion_matrix , accuracy_score , fbeta_score def train_test_f1(model): """ Returns the F1 score of training and test set of any particular model model : model name Returns f1_score_train : F1 score of training set f1_score_test : F1 score of test set """ predict_train = (model.fit(x_train , y_train)).predict(x_train) predict_test = (model.fit(x_train , y_train)).predict(x_test) f1_score_train = fbeta_score(y_train , predict_train , beta = 0.5 , average = 'micro')*100 f1_score_test = fbeta_score(y_test , predict_test , beta = 0.5 , average = 'micro')*100 return f1_score_train , f1_score_test
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Implementing the KNN Model
knn = KNeighborsClassifier() f1_score_train_knn , f1_score_test_knn = train_test_f1(knn)#calculating the F1 scores
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Implementing the Logistic Regression
logistic = LogisticRegression() f1_score_train_logistic , f1_score_test_logistic = train_test_f1(logistic)#calculating the F1 scores
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Implementing the Random Forest Classifier
random_forest = RandomForestClassifier() f1_score_train_random , f1_score_test_random = train_test_f1(random_forest)#calculating the F1 scores
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Implementing the Decision Tree Classifier
decision_tree = DecisionTreeClassifier() f1_score_train_decision , f1_score_test_decision = train_test_f1(decision_tree)#calculating the F1 scores
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Concluding from the above models and scores
f1_scores_models = {'model_name' : [knn.__class__.__name__ , logistic.__class__.__name__ , random_forest.__class__.__name__ , decision_tree.__class__.__name__] , 'Training set F1 Score' : [f1_score_train_knn , f1_score_train_logistic , f1_score_train_random , f1_score_train_decision], 'Test set F1 Score' : [f1_score_test_knn , f1_score_test_logistic , f1_score_test_random , f1_score_test_decision]} f1_scores_df = pd.DataFrame(f1_scores_models) f1_scores_df
_____no_output_____
MIT
Capstone/Starbucks_Capstone_notebook.ipynb
mahajan-abhay/Nanodegree
Unit CommitmentKeywords: semi-continuous variables, cbc usage, gdp, disjunctive programming Imports
%matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd from IPython.display import display, HTML import shutil import sys import os.path if not shutil.which("pyomo"): !pip install -q pyomo assert(shutil.which("pyomo")) if not (shutil.which("cbc") or os.path.isfile("cbc")): if "google.colab" in sys.modules: !apt-get install -y -qq coinor-cbc else: try: !conda install -c conda-forge coincbc except: pass assert(shutil.which("cbc") or os.path.isfile("cbc")) import pyomo.environ as pyo import pyomo.gdp as gdp
_____no_output_____
MIT
_build/html/_sources/notebooks/03/03.06-Unit-Commitment.ipynb
leonlan/MO-book
Problem statementA set of $N$ electrical generating units are available to meet a required demand $d_t$ for time period $t \in 1, 2, \ldots, T$. The power generated by unit $n$ for time period $t$ is denoted $x_{n,t}$. Each generating unit is either off, $x_{n,t} = 0$ or else operating in a range $[p_n^{min}, p_n^{max}]$. The incremental cost of operating the generator during period $t$ is $a_n x_{n,t} + b_n$. A binary variable variable $u_{n,t}$ indicates the operational state of a generating unit. The unit commmitment problem is then\begin{align*}\min \sum_{n\in N} \sum_{t\in T} a_n x_{n,t} + b_n u_{n,t}\end{align*}subject to\begin{align*}\sum_{n\in N} x_{n,t} & = d_t \qquad \forall t \in T \\p_{n}^{min}u_{n,t} & \leq x_{n,t} \qquad \forall n \in N, \ \forall t \in T \\p_{n}^{max}u_{n,t} & \geq x_{n,t} \qquad \forall n \in N, \ \forall t \in T \\\end{align*}where we use the short-cut notation $T = [1, 2, \ldots T]$ and $N = [1, 2, \ldots, N]$.This is a minimal model. A realistic model would include additional constraints corresponding to minimum up and down times for generating units, limits on the rate at which power levels can change, maintenance periods, and so forth.* Sun, Xiaoling, Xiaojin Zheng, and Duan Li. ["Recent advances in mathematical programming with semi-continuous variables and cardinality constraint."](https://link.springer.com/article/10.1007/s40305-013-0004-0) Journal of the Operations Research Society of China 1, no. 1 (2013): 55-77. Model Demand
# demand T = 20 T = np.array([t for t in range(0, T)]) d = np.array([100 + 100*np.random.uniform() for t in T]) fig, ax = plt.subplots(1,1) ax.bar(T+1, d) ax.set_xlabel('Time Period') ax.set_title('Demand')
_____no_output_____
MIT
_build/html/_sources/notebooks/03/03.06-Unit-Commitment.ipynb
leonlan/MO-book
Generating Units
# generating units N = 5 pmax = 2*max(d)/N pmin = 0.6*pmax N = np.array([n for n in range(0, N)]) a = np.array([0.5 + 0.2*np.random.randn() for n in N]) b = np.array([10*np.random.uniform() for n in N]) p = np.linspace(pmin, pmax) fig, ax = plt.subplots(1,1) for n in N: ax.plot(p, a[n]*p + b[n]) ax.set_xlim(0, pmax) ax.set_ylim(0, max(a*pmax + b)) ax.set_xlabel('Unit Production') ax.set_ylabel('Unit Operating Cost') ax.grid()
_____no_output_____
MIT
_build/html/_sources/notebooks/03/03.06-Unit-Commitment.ipynb
leonlan/MO-book
Pyomo model 1: Conventional implementation for emi-continuous variables
def unit_commitment(): m = pyo.ConcreteModel() m.N = pyo.Set(initialize=N) m.T = pyo.Set(initialize=T) m.x = pyo.Var(m.N, m.T, bounds = (0, pmax)) m.u = pyo.Var(m.N, m.T, domain=pyo.Binary) # objective m.cost = pyo.Objective(expr = sum(m.x[n,t]*a[n] + m.u[n,t]*b[n] for t in m.T for n in m.N), sense=pyo.minimize) # demand m.demand = pyo.Constraint(m.T, rule=lambda m, t: sum(m.x[n,t] for n in N) == d[t]) # semi-continuous m.lb = pyo.Constraint(m.N, m.T, rule=lambda m, n, t: pmin*m.u[n,t] <= m.x[n,t]) m.ub = pyo.Constraint(m.N, m.T, rule=lambda m, n, t: pmax*m.u[n,t] >= m.x[n,t]) return m m = unit_commitment() pyo.SolverFactory('cbc').solve(m).write() fig, ax = plt.subplots(max(N)+1, 1, figsize=(8, 1.5*max(N)+1)) for n in N: ax[n].bar(T+1, [m.x[n,t]() for t in T]) ax[n].set_xlim(0, max(T)+2) ax[n].set_ylim(0, 1.1*pmax) ax[n].plot(ax[n].get_xlim(), np.array([pmax, pmax]), 'r--') ax[n].plot(ax[n].get_xlim(), np.array([pmin, pmin]), 'r--') ax[n].set_title('Unit ' + str(n+1)) fig.tight_layout()
# ========================================================== # = Solver Results = # ========================================================== # ---------------------------------------------------------- # Problem Information # ---------------------------------------------------------- Problem: - Name: unknown Lower bound: 1018.71533244 Upper bound: 1018.71533244 Number of objectives: 1 Number of constraints: 200 Number of variables: 180 Number of binary variables: 100 Number of integer variables: 100 Number of nonzeros: 180 Sense: minimize # ---------------------------------------------------------- # Solver Information # ---------------------------------------------------------- Solver: - Status: ok User time: -1.0 System time: 0.06 Wallclock time: 0.06 Termination condition: optimal Termination message: Model was solved to optimality (subject to tolerances), and an optimal solution is available. Statistics: Branch and bound: Number of bounded subproblems: 0 Number of created subproblems: 0 Black box: Number of iterations: 0 Error rc: 0 Time: 0.07869720458984375 # ---------------------------------------------------------- # Solution Information # ---------------------------------------------------------- Solution: - number of solutions: 0 number of solutions displayed: 0
MIT
_build/html/_sources/notebooks/03/03.06-Unit-Commitment.ipynb
leonlan/MO-book
Pyomo model 2: GDP implementation
def unit_commitment_gdp(): m = pyo.ConcreteModel() m.N = pyo.Set(initialize=N) m.T = pyo.Set(initialize=T) m.x = pyo.Var(m.N, m.T, bounds = (0, pmax)) # demand m.demand = pyo.Constraint(m.T, rule=lambda m, t: sum(m.x[n,t] for n in N) == d[t]) # representing the semicontinous variables as disjuctions m.sc1 = gdp.Disjunct(m.N, m.T, rule=lambda d, n, t: d.model().x[n,t] == 0) m.sc2 = gdp.Disjunct(m.N, m.T, rule=lambda d, n, t: d.model().x[n,t] >= pmin) m.sc = gdp.Disjunction(m.N, m.T, rule=lambda m, n, t: [m.sc1[n,t], m.sc2[n,t]]) # objective. Note use of the disjunct indicator variable m.cost = pyo.Objective(expr = sum(m.x[n,t]*a[n] + m.sc2[n,t].indicator_var*b[n] for t in m.T for n in m.N), sense=pyo.minimize) # alternative formulation. But how to access the indicator variable? #m.semicontinuous = gdp.Disjunction(m.N, m.T, rule=lambda m, n, t: [m.x[n,t]==0, m.x[n,t] >= pmin]) pyo.TransformationFactory('gdp.chull').apply_to(m) return m m_gdp = unit_commitment_gdp() pyo.SolverFactory('cbc').solve(m_gdp).write()
# ========================================================== # = Solver Results = # ========================================================== # ---------------------------------------------------------- # Problem Information # ---------------------------------------------------------- Problem: - Name: unknown Lower bound: 863.60019688 Upper bound: 863.60019688 Number of objectives: 1 Number of constraints: 20 Number of variables: 100 Number of binary variables: 200 Number of integer variables: 200 Number of nonzeros: 100 Sense: minimize # ---------------------------------------------------------- # Solver Information # ---------------------------------------------------------- Solver: - Status: ok User time: -1.0 System time: 0.02 Wallclock time: 0.03 Termination condition: optimal Termination message: Model was solved to optimality (subject to tolerances), and an optimal solution is available. Statistics: Branch and bound: Number of bounded subproblems: 0 Number of created subproblems: 0 Black box: Number of iterations: 0 Error rc: 0 Time: 0.04323315620422363 # ---------------------------------------------------------- # Solution Information # ---------------------------------------------------------- Solution: - number of solutions: 0 number of solutions displayed: 0
MIT
_build/html/_sources/notebooks/03/03.06-Unit-Commitment.ipynb
leonlan/MO-book
There is a problem here!Why are the results different? Somehow it appears values of the indicator variables are being ignored.
for n in N: for t in T: print("n = {0:2d} t = {1:2d} {2} {3} {4:5.2f}".format(n, t, m_gdp.sc1[n,t].indicator_var(), m_gdp.sc2[n,t].indicator_var(), m.x[n,t]()))
n = 0 t = 0 1.0 0.0 76.13 n = 0 t = 1 1.0 0.0 45.86 n = 0 t = 2 1.0 0.0 45.86 n = 0 t = 3 1.0 0.0 75.96 n = 0 t = 4 1.0 0.0 45.86 n = 0 t = 5 1.0 0.0 45.86 n = 0 t = 6 1.0 0.0 45.86 n = 0 t = 7 1.0 0.0 73.80 n = 0 t = 8 1.0 0.0 68.79 n = 0 t = 9 1.0 0.0 61.04 n = 0 t = 10 1.0 0.0 47.89 n = 0 t = 11 1.0 0.0 56.14 n = 0 t = 12 1.0 0.0 45.86 n = 0 t = 13 1.0 0.0 45.86 n = 0 t = 14 1.0 0.0 45.86 n = 0 t = 15 1.0 0.0 47.08 n = 0 t = 16 1.0 0.0 45.86 n = 0 t = 17 1.0 0.0 53.14 n = 0 t = 18 1.0 0.0 45.86 n = 0 t = 19 1.0 0.0 47.56 n = 1 t = 0 1.0 0.0 0.00 n = 1 t = 1 1.0 0.0 0.00 n = 1 t = 2 1.0 0.0 0.00 n = 1 t = 3 1.0 0.0 0.00 n = 1 t = 4 1.0 0.0 0.00 n = 1 t = 5 1.0 0.0 0.00 n = 1 t = 6 1.0 0.0 0.00 n = 1 t = 7 1.0 0.0 0.00 n = 1 t = 8 1.0 0.0 0.00 n = 1 t = 9 1.0 0.0 0.00 n = 1 t = 10 1.0 0.0 0.00 n = 1 t = 11 1.0 0.0 0.00 n = 1 t = 12 1.0 0.0 0.00 n = 1 t = 13 1.0 0.0 0.00 n = 1 t = 14 1.0 0.0 0.00 n = 1 t = 15 1.0 0.0 0.00 n = 1 t = 16 1.0 0.0 0.00 n = 1 t = 17 1.0 0.0 0.00 n = 1 t = 18 1.0 0.0 0.00 n = 1 t = 19 1.0 0.0 0.00 n = 2 t = 0 1.0 0.0 0.00 n = 2 t = 1 1.0 0.0 0.00 n = 2 t = 2 1.0 0.0 0.00 n = 2 t = 3 1.0 0.0 0.00 n = 2 t = 4 1.0 0.0 0.00 n = 2 t = 5 1.0 0.0 0.00 n = 2 t = 6 1.0 0.0 0.00 n = 2 t = 7 1.0 0.0 0.00 n = 2 t = 8 1.0 0.0 0.00 n = 2 t = 9 1.0 0.0 0.00 n = 2 t = 10 1.0 0.0 0.00 n = 2 t = 11 1.0 0.0 0.00 n = 2 t = 12 1.0 0.0 0.00 n = 2 t = 13 1.0 0.0 0.00 n = 2 t = 14 1.0 0.0 0.00 n = 2 t = 15 1.0 0.0 0.00 n = 2 t = 16 1.0 0.0 0.00 n = 2 t = 17 1.0 0.0 0.00 n = 2 t = 18 1.0 0.0 0.00 n = 2 t = 19 1.0 0.0 0.00 n = 3 t = 0 1.0 0.0 76.43 n = 3 t = 1 1.0 0.0 61.93 n = 3 t = 2 1.0 0.0 64.26 n = 3 t = 3 1.0 0.0 76.43 n = 3 t = 4 1.0 0.0 57.86 n = 3 t = 5 1.0 0.0 67.42 n = 3 t = 6 1.0 0.0 65.12 n = 3 t = 7 1.0 0.0 76.43 n = 3 t = 8 1.0 0.0 76.43 n = 3 t = 9 1.0 0.0 76.43 n = 3 t = 10 1.0 0.0 76.43 n = 3 t = 11 1.0 0.0 76.43 n = 3 t = 12 1.0 0.0 72.97 n = 3 t = 13 1.0 0.0 72.25 n = 3 t = 14 1.0 0.0 72.77 n = 3 t = 15 1.0 0.0 76.43 n = 3 t = 16 1.0 0.0 70.91 n = 3 t = 17 1.0 0.0 76.43 n = 3 t = 18 1.0 0.0 72.74 n = 3 t = 19 1.0 0.0 76.43 n = 4 t = 0 1.0 0.0 0.00 n = 4 t = 1 1.0 0.0 45.86 n = 4 t = 2 1.0 0.0 45.86 n = 4 t = 3 1.0 0.0 0.00 n = 4 t = 4 1.0 0.0 0.00 n = 4 t = 5 1.0 0.0 45.86 n = 4 t = 6 1.0 0.0 45.86 n = 4 t = 7 1.0 0.0 0.00 n = 4 t = 8 1.0 0.0 45.86 n = 4 t = 9 1.0 0.0 0.00 n = 4 t = 10 1.0 0.0 0.00 n = 4 t = 11 1.0 0.0 0.00 n = 4 t = 12 1.0 0.0 45.86 n = 4 t = 13 1.0 0.0 0.00 n = 4 t = 14 1.0 0.0 0.00 n = 4 t = 15 1.0 0.0 45.86 n = 4 t = 16 1.0 0.0 0.00 n = 4 t = 17 1.0 0.0 0.00 n = 4 t = 18 1.0 0.0 45.86 n = 4 t = 19 1.0 0.0 45.86
MIT
_build/html/_sources/notebooks/03/03.06-Unit-Commitment.ipynb
leonlan/MO-book
Training Neural NetworksThe network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time.At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function.To find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems$$\large \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2}$$where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the predicted labels.By minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base. BackpropagationFor single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks.Training multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation.In the forward pass through the network, our data and operations go from bottom to top here. We pass the input $x$ through a linear transformation $L_1$ with weights $W_1$ and biases $b_1$. The output then goes through the sigmoid operation $S$ and another linear transformation $L_2$. Finally we calculate the loss $\ell$. We use the loss as a measure of how bad the network's predictions are. The goal then is to adjust the weights and biases to minimize the loss.To train the weights with gradient descent, we propagate the gradient of the loss backwards through the network. Each operation has some gradient between the inputs and outputs. As we send the gradients backwards, we multiply the incoming gradient with the gradient for the operation. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule.$$\large \frac{\partial \ell}{\partial W_1} = \frac{\partial L_1}{\partial W_1} \frac{\partial S}{\partial L_1} \frac{\partial L_2}{\partial S} \frac{\partial \ell}{\partial L_2}$$**Note:** I'm glossing over a few details here that require some knowledge of vector calculus, but they aren't necessary to understand what's going on.We update our weights using this gradient with some learning rate $\alpha$. $$\large W^\prime_1 = W_1 - \alpha \frac{\partial \ell}{\partial W_1}$$The learning rate $\alpha$ is set such that the weight update steps are small enough that the iterative method settles in a minimum. Losses in PyTorchLet's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the cross-entropy loss (`nn.CrossEntropyLoss`). You'll usually see the loss assigned to `criterion`. As noted in the last part, with a classification problem such as MNIST, we're using the softmax function to predict class probabilities. With a softmax output, you want to use cross-entropy as the loss. To actually calculate the loss, you first define the criterion then pass in the output of your network and the correct labels.Something really important to note here. Looking at [the documentation for `nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/nn.htmltorch.nn.CrossEntropyLoss),> This criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in one single class.>> The input is expected to contain scores for each class.This means we need to pass in the raw output of our network into the loss, not the output of the softmax function. This raw output is usually called the *logits* or *scores*. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities.
import torch from torch import nn import torch.nn.functional as F from torchvision import datasets, transforms # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)), ]) # Download and load the training data trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10)) # Define the loss criterion = nn.CrossEntropyLoss() # Get our data images, labels = next(iter(trainloader)) # Flatten images images = images.view(images.shape[0], -1) # Forward pass, get our logits logits = model(images) # Calculate the loss with the logits and the labels loss = criterion(logits, labels) print(loss)
tensor(2.3011, grad_fn=<NllLossBackward>)
MIT
intro-to-pytorch/Part 3 - Training Neural Networks (Solution).ipynb
yangjue-han/deep-learning-v2-pytorch
In my experience it's more convenient to build the model with a log-softmax output using `nn.LogSoftmax` or `F.log_softmax` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.LogSoftmax)). Then you can get the actual probabilites by taking the exponential `torch.exp(output)`. With a log-softmax output, you want to use the negative log likelihood loss, `nn.NLLLoss` ([documentation](https://pytorch.org/docs/stable/nn.htmltorch.nn.NLLLoss)).>**Exercise:** Build a model that returns the log-softmax as the output and calculate the loss using the negative log likelihood loss.
## Solution # Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) # Define the loss criterion = nn.NLLLoss() # Get our data images, labels = next(iter(trainloader)) # Flatten images images = images.view(images.shape[0], -1) # Forward pass, get our log-probabilities logps = model(images) # Calculate the loss with the logps and the labels loss = criterion(logps, labels) print(loss)
tensor(2.2987, grad_fn=<NllLossBackward>)
MIT
intro-to-pytorch/Part 3 - Training Neural Networks (Solution).ipynb
yangjue-han/deep-learning-v2-pytorch
AutogradNow that we know how to calculate a loss, how do we use it to perform backpropagation? Torch provides a module, `autograd`, for automatically calculating the gradients of tensors. We can use it to calculate the gradients of all our parameters with respect to the loss. Autograd works by keeping track of operations performed on tensors, then going backwards through those operations, calculating gradients along the way. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad = True` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`.You can turn off gradients for a block of code with the `torch.no_grad()` content:```pythonx = torch.zeros(1, requires_grad=True)>>> with torch.no_grad():... y = x * 2>>> y.requires_gradFalse```Also, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`.The gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`.
x = torch.randn(2,2, requires_grad=True) print(x) y = x**2 print(y)
tensor([[0.0357, 0.2308], [1.3125, 2.6173]], grad_fn=<PowBackward0>)
MIT
intro-to-pytorch/Part 3 - Training Neural Networks (Solution).ipynb
yangjue-han/deep-learning-v2-pytorch
Below we can see the operation that created `y`, a power operation `PowBackward0`.
## grad_fn shows the function that generated this variable print(y.grad_fn)
<PowBackward0 object at 0x107e2e278>
MIT
intro-to-pytorch/Part 3 - Training Neural Networks (Solution).ipynb
yangjue-han/deep-learning-v2-pytorch
The autograd module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean.
z = y.mean() print(z)
tensor(1.0491, grad_fn=<MeanBackward0>)
MIT
intro-to-pytorch/Part 3 - Training Neural Networks (Solution).ipynb
yangjue-han/deep-learning-v2-pytorch
You can check the gradients for `x` and `y` but they are empty currently.
print(x.grad)
None
MIT
intro-to-pytorch/Part 3 - Training Neural Networks (Solution).ipynb
yangjue-han/deep-learning-v2-pytorch
To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x`$$\frac{\partial z}{\partial x} = \frac{\partial}{\partial x}\left[\frac{1}{n}\sum_i^n x_i^2\right] = \frac{x}{2}$$
z.backward() print(x.grad) print(x/2)
tensor([[-0.0945, -0.2402], [ 0.5728, 0.8089]]) tensor([[-0.0945, -0.2402], [ 0.5728, 0.8089]], grad_fn=<DivBackward0>)
MIT
intro-to-pytorch/Part 3 - Training Neural Networks (Solution).ipynb
yangjue-han/deep-learning-v2-pytorch
These gradients calculations are particularly useful for neural networks. For training we need the gradients of the weights with respect to the cost. With PyTorch, we run data forward through the network to calculate the loss, then, go backwards to calculate the gradients with respect to the loss. Once we have the gradients we can make a gradient descent step. Loss and Autograd togetherWhen we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent. Below you can see an example of calculating the gradients using a backwards pass.
# Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) criterion = nn.NLLLoss() images, labels = next(iter(trainloader)) images = images.view(images.shape[0], -1) logps = model(images) loss = criterion(logps, labels) print('Before backward pass: \n', model[0].weight.grad) loss.backward() print('After backward pass: \n', model[0].weight.grad)
Before backward pass: None After backward pass: tensor([[ 2.9076e-04, 2.9076e-04, 2.9076e-04, ..., 2.9076e-04, 2.9076e-04, 2.9076e-04], [ 1.8523e-03, 1.8523e-03, 1.8523e-03, ..., 1.8523e-03, 1.8523e-03, 1.8523e-03], [-1.0316e-03, -1.0316e-03, -1.0316e-03, ..., -1.0316e-03, -1.0316e-03, -1.0316e-03], ..., [-3.6785e-05, -3.6785e-05, -3.6785e-05, ..., -3.6785e-05, -3.6785e-05, -3.6785e-05], [-1.3995e-03, -1.3995e-03, -1.3995e-03, ..., -1.3995e-03, -1.3995e-03, -1.3995e-03], [ 0.0000e+00, 0.0000e+00, 0.0000e+00, ..., 0.0000e+00, 0.0000e+00, 0.0000e+00]])
MIT
intro-to-pytorch/Part 3 - Training Neural Networks (Solution).ipynb
yangjue-han/deep-learning-v2-pytorch
Training the network!There's one last piece we need to start training, an optimizer that we'll use to update the weights with the gradients. We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use stochastic gradient descent with `optim.SGD`. You can see how to define an optimizer below.
from torch import optim # Optimizers require the parameters to optimize and a learning rate optimizer = optim.SGD(model.parameters(), lr=0.01)
_____no_output_____
MIT
intro-to-pytorch/Part 3 - Training Neural Networks (Solution).ipynb
yangjue-han/deep-learning-v2-pytorch
Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch:* Make a forward pass through the network * Use the network output to calculate the loss* Perform a backward pass through the network with `loss.backward()` to calculate the gradients* Take a step with the optimizer to update the weightsBelow I'll go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches.
print('Initial weights - ', model[0].weight) images, labels = next(iter(trainloader)) images.resize_(64, 784) # Clear the gradients, do this because gradients are accumulated optimizer.zero_grad() # Forward pass, then backward pass, then update weights output = model(images) loss = criterion(output, labels) loss.backward() print('Gradient -', model[0].weight.grad) # Take an update step and few the new weights optimizer.step() print('Updated weights - ', model[0].weight)
Updated weights - Parameter containing: tensor([[ 0.0134, 0.0305, 0.0163, ..., -0.0268, 0.0101, -0.0027], [-0.0334, -0.0089, -0.0294, ..., 0.0047, -0.0106, -0.0214], [-0.0068, -0.0275, -0.0132, ..., -0.0203, 0.0075, 0.0117], ..., [-0.0147, 0.0041, 0.0312, ..., 0.0302, 0.0105, 0.0253], [ 0.0122, 0.0233, 0.0090, ..., 0.0184, 0.0041, -0.0196], [ 0.0138, 0.0348, 0.0040, ..., -0.0239, -0.0291, 0.0166]], requires_grad=True)
MIT
intro-to-pytorch/Part 3 - Training Neural Networks (Solution).ipynb
yangjue-han/deep-learning-v2-pytorch
Training for realNow we'll put this algorithm into a loop so we can go through all the images. Some nomenclature, one pass through the entire dataset is called an *epoch*. So here we're going to loop through `trainloader` to get our training batches. For each batch, we'll doing a training pass where we calculate the loss, do a backwards pass, and update the weights.> **Exercise: ** Implement the training pass for our network. If you implemented it correctly, you should see the training loss drop with each epoch.
model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) criterion = nn.NLLLoss() optimizer = optim.SGD(model.parameters(), lr=0.003) epochs = 5 for e in range(epochs): running_loss = 0 for images, labels in trainloader: # Flatten MNIST images into a 784 long vector images = images.view(images.shape[0], -1) # TODO: Training pass optimizer.zero_grad() output = model(images) loss = criterion(output, labels) loss.backward() optimizer.step() running_loss += loss.item() else: print(f"Training loss: {running_loss/len(trainloader)}")
_____no_output_____
MIT
intro-to-pytorch/Part 3 - Training Neural Networks (Solution).ipynb
yangjue-han/deep-learning-v2-pytorch
With the network trained, we can check out it's predictions.
%matplotlib inline import helper images, labels = next(iter(trainloader)) img = images[0].view(1, 784) # Turn off gradients to speed up this part with torch.no_grad(): logps = model(img) # Output of the network are log-probabilities, need to take exponential for probabilities ps = torch.exp(logps) helper.view_classify(img.view(1, 28, 28), ps)
_____no_output_____
MIT
intro-to-pytorch/Part 3 - Training Neural Networks (Solution).ipynb
yangjue-han/deep-learning-v2-pytorch
Cleaning / Sampling
def cleanDF (df): r1 = re.compile('.*reporting') r2 = re.compile('.*imputed') cols_to_drop1 = list(filter((r1.match), df.columns)) cols_to_drop2 = list(filter((r2.match), df.columns)) cols_to_drop3 = ['admit_NICU'] cols_to_drop = cols_to_drop1 + cols_to_drop2 + cols_to_drop3 cols_to_keep = [col for col in df.columns if col not in cols_to_drop] X_and_target = df[cols_to_keep + ['admit_NICU']].copy() numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] catDF = X_and_target.select_dtypes(include=object).copy() numDF = X_and_target.select_dtypes(include=numerics).copy() #only numeric columns le = LabelEncoder() catDF = catDF.apply(le.fit_transform) concat_df = pd.concat([numDF,catDF],axis=1) return concat_df
_____no_output_____
FTL
notebooks/richardkim/RK_modeling.ipynb
ConnorHaas03/CDC_capstone
Logistic Model Part 1
sample_size_list = [100] import warnings warnings.filterwarnings('ignore') #GLM with Cross Validation for sample_per_year in sample_size_list: dwnSmplDF = concat_df.groupby('birth_year',group_keys = False).apply(lambda x: x.sample(sample_per_year)) cl_df = dwnSmplDF[cols_to_keep] encoded_target = dwnSmplDF['admit_NICU'] glm_CV = linear_model.LogisticRegressionCV(#Cs = int(1e4), cv = 5, solver = 'saga', n_jobs = -1, random_state = 108 ).fit(cl_df, encoded_target) print('sample size : %d\n' % (sample_per_year*5)) %time glm_CV.fit(cl_df, encoded_target) print('\nscore : {0}'.format(glm_CV.score(cl_df, encoded_target))) print('-'*50) ''' sample size : 500 CPU times: user 6min 47s, sys: 1min 26s, total: 8min 14s Wall time: 1min 54s score : 0.932 -------------------------------------------------- ''' #GLM with Lasso Penalty for sample_per_year in sample_size_list: dwnSmplDF = concat_df.groupby('birth_year',group_keys = False).apply(lambda x: x.sample(sample_per_year)) cl_df = dwnSmplDF[cols_to_keep] encoded_target = dwnSmplDF['admit_NICU'] glm_lasso = linear_model.LogisticRegression(penalty = 'l1', solver = 'saga', multi_class='auto', n_jobs = -1, C = 1e4) print('sample size : %d\n' % (sample_per_year*5)) %time glm_lasso.fit(cl_df, encoded_target) print('\nscore : {0}'.format(glm_lasso.score(cl_df, encoded_target))) print('-'*50) #GLM with Lasso Penalty and Cross Validation sample_size_list = [100,1000,10000] for sample_per_year in sample_size_list: dwnSmplDF = concat_df.groupby('birth_year',group_keys = False).apply(lambda x: x.sample(sample_per_year)) cl_df = dwnSmplDF[cols_to_keep] encoded_target = dwnSmplDF['admit_NICU'] glm_lassoCV = linear_model.LogisticRegressionCV(#Cs = int(1e4), cv = 5, penalty = 'l1', solver = 'saga', n_jobs = -1, random_state = 108 ).fit(cl_df, encoded_target) print('sample size : %d\n' % (sample_per_year*5)) %time glm_lassoCV.fit(cl_df, encoded_target) print('\nscore : {0}'.format(glm_lassoCV.score(cl_df, encoded_target))) print('-'*50) #GLM with Lasso Penalty and Cross Validation sample_size_list = [20000] for sample_per_year in sample_size_list: dwnSmplDF = concat_df.groupby('birth_year',group_keys = False).apply(lambda x: x.sample(sample_per_year)) cl_df = dwnSmplDF[cols_to_keep] encoded_target = dwnSmplDF['admit_NICU'] glm_lassoCV = linear_model.LogisticRegressionCV(#Cs = int(1e4), cv = 5, penalty = 'l1', solver = 'saga', n_jobs = -1, random_state = 108 ).fit(cl_df, encoded_target) print('sample size : %d\n' % (sample_per_year*5)) %time glm_lassoCV.fit(cl_df, encoded_target) print('\nscore : {0}'.format(glm_lassoCV.score(cl_df, encoded_target))) print('-'*50) glm_lassoCV pickle.dump(glm_lassoCV, open('best_glmlassoCV.sav', 'wb')) # glm_lassoCV = pickle.load(open('best_glmlassoCV.sav', 'rb')) glm_lassoCV.get_params from sklearn.metrics import confusion_matrix print(confusion_matrix(encoded_target,y_pred)) cf = confusion_matrix(encoded_target,y_pred) np.set_printoptions(suppress=True) print(cf/1000.) glmLCV_coefs = pd.DataFrame({'col' :list(cl_df.columns), 'coef0': glm_lassoCV.coef_[0], 'coef1': glm_lassoCV.coef_[1], 'coef2': glm_lassoCV.coef_[2]}) glmLCV_coefs glmLCV_coefs['abs_coef0'] = glmLCV_coefs['coef0'].apply(abs) glmLCV_coefs['abs_coef1'] = glmLCV_coefs['coef1'].apply(abs) glmLCV_coefs['abs_coef2'] = glmLCV_coefs['coef2'].apply(abs) top20_coef0 = glmLCV_coefs.nlargest(10,'abs_coef0')['col'] top20_coef1 = glmLCV_coefs.nlargest(10,'abs_coef1')['col'] top20_coef2 = glmLCV_coefs.nlargest(10,'abs_coef2')['col'] top20_coef0 top20_coef1 top20_coef2 list(set(top20_coef0).union(set(top20_coef1)).union(set(top20_coef2))) list(set(top20_coef0).intersection(set(top20_coef1)).intersection(set(top20_coef2))) ''' sample size : 100000 CPU times: user 54min 1s, sys: 37.3 s, total: 54min 39s Wall time: 8min 39s score : 0.92512 -------------------------------------------------- ''' #GLM with Lasso Penalty and Cross Validation # sample_size_list = [200000] # for sample_per_year in sample_size_list: # dwnSmplDF = concat_df.groupby('birth_year',group_keys = False).apply(lambda x: x.sample(sample_per_year)) # cl_df = dwnSmplDF[cols_to_keep] # encoded_target = dwnSmplDF['admit_NICU'] # glm_lassoCV = linear_model.LogisticRegressionCV(#Cs = int(1e4), # cv = 5, # penalty = 'l1', # solver = 'saga', # n_jobs = -2, # random_state = 108 # ).fit(cl_df, encoded_target) # print('sample size : %d\n' % (sample_per_year*5)) # %time glm_lassoCV.fit(cl_df, encoded_target) # print('\nscore : {0}'.format(glm_lassoCV.score(cl_df, encoded_target))) # print('-'*50)
_____no_output_____
FTL
notebooks/richardkim/RK_modeling.ipynb
ConnorHaas03/CDC_capstone
Logistic Model Part 2 Sampled in a way that1. Unknowns in `admit_NICU` column was thrown away.2. There are equal number of `Y`'s and `N`'s in `admit_NICU` column. (balanced sampling)
cl_df = cleanDF(totDF) nicu_allY = cl_df.loc[cl_df['admit_NICU']==1] nicu_allN = cl_df.loc[cl_df['admit_NICU']==0] #pure GLM with balanced sample (w/o stratified year) sample_size_list = [100] for sample_per_class in sample_size_list: sampN = nicu_allN.sample(sample_per_class) sampY = nicu_allY.sample(sample_per_class) samp = pd.concat([sampN,sampY],axis=0) samp_target = samp.admit_NICU samp_X = samp.drop('admit_NICU',axis=1) # bal_dwnSmplY = nicu_allY.groupby('birth_year',group_keys = False).apply(lambda x: x.sample(sample_per_year)) # bal_dwnSmplN = nicu_allN.groupby('birth_year',group_keys = False).apply(lambda x: x.sample(sample_per_year)) # bal_dwnSmpl = pd.concat([bal_dwnSmlpY,bal_dwnamlpN],axis=0) glm = linear_model.LogisticRegression(solver = 'saga', multi_class='auto', n_jobs = -1, C = 1e4) print('sample size : %d\n' % (sample_per_class*2)) %time glm.fit(samp_X, samp_target) print('\nscore : {0}'.format(glm.score(samp_X, samp_target))) print('-'*50) #pure GLM with balanced sample (w/ stratified year) sample_size_class = [100] for sample_per_class in sample_size_class: # sampN = nicu_allN.sample(sample_per_class) # sampY = nicu_allY.sample(sample_per_class) # samp = pd.concat([sampN,sampY],axis=0) # samp_target = samp.admit_NICU # samp_X = samp.drop('admit_NICU',axis=1) bal_dwnSmplY = nicu_allY.groupby('birth_year',group_keys = False).apply(lambda x: x.sample(sample_per_class)) bal_dwnSmplN = nicu_allN.groupby('birth_year',group_keys = False).apply(lambda x: x.sample(sample_per_class)) bal_dwnSmpl = pd.concat([bal_dwnSmplY,bal_dwnSmplN],axis=0) bal_target = bal_dwnSmpl.admit_NICU bal_X = bal_dwnSmpl.drop('admit_NICU',axis=1) glm = linear_model.LogisticRegression(solver = 'saga', multi_class='auto', n_jobs = -1, C = 1e4) print('sample size : %d\n' % (sample_per_class*2)) %time glm.fit(bal_X, bal_target) print('\nscore : {0}'.format(glm.score(bal_X, bal_target))) print('-'*50)
sample size : 200 CPU times: user 79.9 ms, sys: 1.13 ms, total: 81 ms Wall time: 108 ms score : 0.774 --------------------------------------------------
FTL
notebooks/richardkim/RK_modeling.ipynb
ConnorHaas03/CDC_capstone
----- Session 06: OOP By: **Mohamed Fouad Fakhruldeen**, [email protected] Class & Object
class ClassName: attributes methods
_____no_output_____
MIT
Session 06 - OOP.ipynb
FOU4D/ITI-Python
attributes
class My_Class: my_attr = "old Attribute Value Here" x = My_Class() print(x) print(x.my_attr) x.my_attr = "New Attribute Value" print(x.my_attr)
<__main__.My_Class object at 0x7f47c80e8130> old Attribute Value Here New Attribute Value
MIT
Session 06 - OOP.ipynb
FOU4D/ITI-Python
methods
class My_Class: my_attr = "New Attribute Value Here" # class attribute def my_method(self): print("Print my method") x = My_Class() print(x) print(x.my_attr) x.my_method() class My_Cars: def __init__(self, brand, model, year, price): ## instance attributes self.brand = brand self.model = model self.year = year self.price = price def description(self): return f"{self.brand} {self.model} made in {self.year} with initial value {self.price}" def pricenow(self, condition): return f"{self.description()} has new value {condition*self.price}" def __str__(self): return "this only appears while printing" first_car = My_Cars("Toyota", "Corolla", 2016, 5000) print(first_car.brand) print(first_car.description()) print(first_car.pricenow(70/100)) print(first_car) x = str(first_car) print(x)
Toyota Toyota Corolla made in 2016 with initial value 5000 Toyota Corolla made in 2016 with initial value 5000 has new value 3500.0 this only appears while printing this only appears while printing
MIT
Session 06 - OOP.ipynb
FOU4D/ITI-Python
Inheritance Child classes can override or extend the attributes and methods of parent classes. can also specify attributes and methods that are unique to themselves.
class MainClass: attr1 = "this is parent attribute" class ChildClass(MainClass): pass x = ChildClass() print(x.attr1) class MainClass2: attr12 = "this is parent attribute" class ChildClass2(MainClass2): attr12 = "This one from child" x2 = ChildClass2() print(x2.attr12)
This one from child
MIT
Session 06 - OOP.ipynb
FOU4D/ITI-Python
multiple inheretance
class Base1: pass class Base2: pass class MultiDerived(Base1, Base2): pass class Base: pass class Derived1(Base): pass class Derived2(Derived1): pass
_____no_output_____
MIT
Session 06 - OOP.ipynb
FOU4D/ITI-Python