code
stringlengths
13
6.09M
order_type
stringclasses
2 values
original_example
dict
step_ids
listlengths
1
5
from __future__ import print_function from __future__ import absolute_import # # LinkedIn Sales Module # import requests from bs4 import BeautifulSoup import logging from plugins.base import PageGrabber from plugins.colors import BodyColors as bc import json try: import __builtin__ as bi except: import builtins as bi class LinkedInGrabber(PageGrabber): # LinkedIN.com sales scraper for email lookups def get_info(self,email): # Requires AUTH, login and request AUTHENTICATED pages from linkedin client = requests.Session() # Establish the session() print("["+bc.CPRP+"?"+bc.CEND+"] "+bc.CCYN + "LinkedIn" + bc.CEND) HOMEPAGE_URL = 'https://www.linkedin.com' # Set homepage for linkedin LOGIN_URL = 'https://www.linkedin.com/uas/login-submit' # Set login page for linkedin LOGOUT_URL = 'https://www.linkedin.com/m/logout' source = client.get(HOMEPAGE_URL).content # Request source soup = self.get_dom(source) # BS DOM csrf = soup.find(id="loginCsrfParam-login")['value'] # # ATTENTION:: YOU MUST POPULATE THE FOLLOWING WITH YOUR REAL CREDENTIALS # # ATTENTION:: THIS WILL NOT WORK PROPRLY OTHERWISE # # session_key = email session_password = your password # try: with open('./storage/fb_login', 'r') as fbinfo: login_information = json.loads(fbinfo.read()) #print(json.loads(login_information)) login_information['loginCsrfParam'] = csrf except: login_information = { 'session_key':'', 'session_password':'', 'loginCsrfParam': '', } pass if not login_information['session_key']: if login_information['session_password'] == '': # If no modifications of default u/p, print error, return print (" ["+bc.CRED+"ATTENTION"+bc.CEND+"] " + \ bc.CYLW+"\tThis module requires authentication to use it properly.\n\tIt will store Credential pairs in plain-text."+bc.CEND) print (" ["+bc.CRED+"ATTENTION"+bc.CEND+"] " + \ bc.CYLW + "This could produce a trail and identify the used account."+bc.CEND) print() savecreds = raw_input("[{}?{}] {}Would you like to save credentials now? {}(Y/n){}]: ".format(bc.CRED,bc.CEND,bc.CRED,bc.CYLW,bc.CEND)) print() luser = raw_input(" ["+bc.CRED+"?"+bc.CEND+"] " + \ bc.CYLW+"What is your throw-away linkedin username: "+bc.CEND) lpass = raw_input(" ["+bc.CRED+"?"+bc.CEND+"] " + \ bc.CYLW+"What is your throw-away linkedin password: "+bc.CEND) login_information = { 'session_key':luser, 'session_password':lpass, 'loginCsrfParam': csrf, } if str(savecreds).lower() in ['y','yes']: try: with open('./storage/fb_login','w') as fbinfo: fbinfo.write(json.dumps(login_information)) except Exception as failedtowrite: print(("Failed to write fbinfo to file: %s") % failedtowrite) try: client.post(LOGIN_URL, data=login_information) results = client.get('https://linkedin.com/sales/gmail/profile/viewByEmail/'+str(email)).text except Exception as failedlinkedinauth: print((" ["+bc.CRED+"X"+bc.CEND+"] " + \ bc.CYLW+"This module did not properly authenticate: %s" + \ bc.CEND) % failedlinkedinauth) soup = self.get_dom(results) self.get_source(LOGOUT_URL) # Log out of LinkedIn, kills sessionID try: # Search and set from results profile = soup.find('a',attrs={'class': 'li-hover-under li-txt-black-85'})['href'] print(" ["+bc.CGRN+"+"+bc.CEND+"] "+ \ bc.CRED+"Profile: "+bc.CEND + \ str(profile) ) except: print(" ["+bc.CRED+"X"+bc.CEND+"] " + \ bc.CYLW+"No LinkedIn account found.\n" + \ bc.CEND ) return try: fname = soup.find('span',attrs={'id': 'li-profile-name'})['data-fname'] lname = soup.find('span',attrs={'id': 'li-profile-name'})['data-lname'] name = str(fname) + " " + str(lname) print(" ["+bc.CGRN+"+"+bc.CEND+"] " + \ bc.CRED+"Name: " + \ bc.CEND+ str(fname) + \ " " + \ str(lname) ) except: name = "" pass # print (" ["+bc.CRED+"X"+bc.CEND+"] "+bc.CYLW+"No username can be found.\n"+bc.CEND) try: company = soup.find('span',{'class': 'li-user-title-company'}).get_text() print(" ["+bc.CGRN+"+"+bc.CEND+"] " + \ bc.CRED+"Company: " + \ bc.CEND + str(company) ) except: company = "" pass # print (" ["+bc.CRED+"X"+bc.CEND+"] "+bc.CYLW+"No Company can be found.\n"+bc.CEND) try: title = soup.find('div',{'class':'li-user-title'}).get_text() print(" ["+bc.CGRN+"+"+bc.CEND+"] " + \ bc.CRED+"Title: " + \ bc.CEND+\ str(title) ) except: title = "" pass #print (" ["+bc.CRED+"X"+bc.CEND+"] "+bc.CYLW+"No Job Title can be found.\n"+bc.CEND) try: location = soup.find('div', {'class':'li-user-location'}).get_text() print(" ["+bc.CGRN+"+"+bc.CEND+"] "+bc.CRED+"Location: "+bc.CEND+ str(location)) except: location = "" pass #print (" ["+bc.CRED+"X"+bc.CEND+"] "+bc.CYLW+"No Location can be found.\n"+bc.CEND) try: email = soup.find('span', {'id':'email'}).get_text() print(" ["+bc.CGRN+"+"+bc.CEND+"] "+bc.CRED+"Email: "+bc.CEND+ str(email)) except: email ="" pass #print (" ["+bc.CRED+"X"+bc.CEND+"] "+bc.CYLW+"No Email account found.\n"+bc.CEND) self.info_dict.update({ "profile": profile, "name": name, "location": location, "company": company, "title":title, "email":email }) bi.outdata['linkedin'] = self.info_dict print() return
normal
{ "blob_id": "570e0d46aa1ea88d1784447e8f693199e3c3b6ad", "index": 9488, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass LinkedInGrabber(PageGrabber):\n\n def get_info(self, email):\n client = requests.Session()\n print('[' + bc.CPRP + '?' + bc.CEND + '] ' + bc.CCYN + 'LinkedIn' +\n bc.CEND)\n HOMEPAGE_URL = 'https://www.linkedin.com'\n LOGIN_URL = 'https://www.linkedin.com/uas/login-submit'\n LOGOUT_URL = 'https://www.linkedin.com/m/logout'\n source = client.get(HOMEPAGE_URL).content\n soup = self.get_dom(source)\n csrf = soup.find(id='loginCsrfParam-login')['value']\n try:\n with open('./storage/fb_login', 'r') as fbinfo:\n login_information = json.loads(fbinfo.read())\n login_information['loginCsrfParam'] = csrf\n except:\n login_information = {'session_key': '', 'session_password': '',\n 'loginCsrfParam': ''}\n pass\n if not login_information['session_key']:\n if login_information['session_password'] == '':\n print(' [' + bc.CRED + 'ATTENTION' + bc.CEND + '] ' + bc.\n CYLW +\n \"\"\"\tThis module requires authentication to use it properly.\n\tIt will store Credential pairs in plain-text.\"\"\"\n + bc.CEND)\n print(' [' + bc.CRED + 'ATTENTION' + bc.CEND + '] ' + bc.\n CYLW +\n 'This could produce a trail and identify the used account.'\n + bc.CEND)\n print()\n savecreds = raw_input(\n '[{}?{}] {}Would you like to save credentials now? {}(Y/n){}]: '\n .format(bc.CRED, bc.CEND, bc.CRED, bc.CYLW, bc.CEND))\n print()\n luser = raw_input(' [' + bc.CRED + '?' + bc.CEND + '] ' +\n bc.CYLW + 'What is your throw-away linkedin username: ' +\n bc.CEND)\n lpass = raw_input(' [' + bc.CRED + '?' + bc.CEND + '] ' +\n bc.CYLW + 'What is your throw-away linkedin password: ' +\n bc.CEND)\n login_information = {'session_key': luser,\n 'session_password': lpass, 'loginCsrfParam': csrf}\n if str(savecreds).lower() in ['y', 'yes']:\n try:\n with open('./storage/fb_login', 'w') as fbinfo:\n fbinfo.write(json.dumps(login_information))\n except Exception as failedtowrite:\n print('Failed to write fbinfo to file: %s' %\n failedtowrite)\n try:\n client.post(LOGIN_URL, data=login_information)\n results = client.get(\n 'https://linkedin.com/sales/gmail/profile/viewByEmail/' +\n str(email)).text\n except Exception as failedlinkedinauth:\n print((' [' + bc.CRED + 'X' + bc.CEND + '] ' + bc.CYLW +\n 'This module did not properly authenticate: %s' + bc.CEND) %\n failedlinkedinauth)\n soup = self.get_dom(results)\n self.get_source(LOGOUT_URL)\n try:\n profile = soup.find('a', attrs={'class':\n 'li-hover-under li-txt-black-85'})['href']\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Profile: ' + bc.CEND + str(profile))\n except:\n print(' [' + bc.CRED + 'X' + bc.CEND + '] ' + bc.CYLW +\n \"\"\"No LinkedIn account found.\n\"\"\" + bc.CEND)\n return\n try:\n fname = soup.find('span', attrs={'id': 'li-profile-name'})[\n 'data-fname']\n lname = soup.find('span', attrs={'id': 'li-profile-name'})[\n 'data-lname']\n name = str(fname) + ' ' + str(lname)\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Name: ' + bc.CEND + str(fname) + ' ' + str(lname))\n except:\n name = ''\n pass\n try:\n company = soup.find('span', {'class': 'li-user-title-company'}\n ).get_text()\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Company: ' + bc.CEND + str(company))\n except:\n company = ''\n pass\n try:\n title = soup.find('div', {'class': 'li-user-title'}).get_text()\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Title: ' + bc.CEND + str(title))\n except:\n title = ''\n pass\n try:\n location = soup.find('div', {'class': 'li-user-location'}\n ).get_text()\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Location: ' + bc.CEND + str(location))\n except:\n location = ''\n pass\n try:\n email = soup.find('span', {'id': 'email'}).get_text()\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Email: ' + bc.CEND + str(email))\n except:\n email = ''\n pass\n self.info_dict.update({'profile': profile, 'name': name, 'location':\n location, 'company': company, 'title': title, 'email': email})\n bi.outdata['linkedin'] = self.info_dict\n print()\n return\n", "step-3": "<mask token>\ntry:\n import __builtin__ as bi\nexcept:\n import builtins as bi\n\n\nclass LinkedInGrabber(PageGrabber):\n\n def get_info(self, email):\n client = requests.Session()\n print('[' + bc.CPRP + '?' + bc.CEND + '] ' + bc.CCYN + 'LinkedIn' +\n bc.CEND)\n HOMEPAGE_URL = 'https://www.linkedin.com'\n LOGIN_URL = 'https://www.linkedin.com/uas/login-submit'\n LOGOUT_URL = 'https://www.linkedin.com/m/logout'\n source = client.get(HOMEPAGE_URL).content\n soup = self.get_dom(source)\n csrf = soup.find(id='loginCsrfParam-login')['value']\n try:\n with open('./storage/fb_login', 'r') as fbinfo:\n login_information = json.loads(fbinfo.read())\n login_information['loginCsrfParam'] = csrf\n except:\n login_information = {'session_key': '', 'session_password': '',\n 'loginCsrfParam': ''}\n pass\n if not login_information['session_key']:\n if login_information['session_password'] == '':\n print(' [' + bc.CRED + 'ATTENTION' + bc.CEND + '] ' + bc.\n CYLW +\n \"\"\"\tThis module requires authentication to use it properly.\n\tIt will store Credential pairs in plain-text.\"\"\"\n + bc.CEND)\n print(' [' + bc.CRED + 'ATTENTION' + bc.CEND + '] ' + bc.\n CYLW +\n 'This could produce a trail and identify the used account.'\n + bc.CEND)\n print()\n savecreds = raw_input(\n '[{}?{}] {}Would you like to save credentials now? {}(Y/n){}]: '\n .format(bc.CRED, bc.CEND, bc.CRED, bc.CYLW, bc.CEND))\n print()\n luser = raw_input(' [' + bc.CRED + '?' + bc.CEND + '] ' +\n bc.CYLW + 'What is your throw-away linkedin username: ' +\n bc.CEND)\n lpass = raw_input(' [' + bc.CRED + '?' + bc.CEND + '] ' +\n bc.CYLW + 'What is your throw-away linkedin password: ' +\n bc.CEND)\n login_information = {'session_key': luser,\n 'session_password': lpass, 'loginCsrfParam': csrf}\n if str(savecreds).lower() in ['y', 'yes']:\n try:\n with open('./storage/fb_login', 'w') as fbinfo:\n fbinfo.write(json.dumps(login_information))\n except Exception as failedtowrite:\n print('Failed to write fbinfo to file: %s' %\n failedtowrite)\n try:\n client.post(LOGIN_URL, data=login_information)\n results = client.get(\n 'https://linkedin.com/sales/gmail/profile/viewByEmail/' +\n str(email)).text\n except Exception as failedlinkedinauth:\n print((' [' + bc.CRED + 'X' + bc.CEND + '] ' + bc.CYLW +\n 'This module did not properly authenticate: %s' + bc.CEND) %\n failedlinkedinauth)\n soup = self.get_dom(results)\n self.get_source(LOGOUT_URL)\n try:\n profile = soup.find('a', attrs={'class':\n 'li-hover-under li-txt-black-85'})['href']\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Profile: ' + bc.CEND + str(profile))\n except:\n print(' [' + bc.CRED + 'X' + bc.CEND + '] ' + bc.CYLW +\n \"\"\"No LinkedIn account found.\n\"\"\" + bc.CEND)\n return\n try:\n fname = soup.find('span', attrs={'id': 'li-profile-name'})[\n 'data-fname']\n lname = soup.find('span', attrs={'id': 'li-profile-name'})[\n 'data-lname']\n name = str(fname) + ' ' + str(lname)\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Name: ' + bc.CEND + str(fname) + ' ' + str(lname))\n except:\n name = ''\n pass\n try:\n company = soup.find('span', {'class': 'li-user-title-company'}\n ).get_text()\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Company: ' + bc.CEND + str(company))\n except:\n company = ''\n pass\n try:\n title = soup.find('div', {'class': 'li-user-title'}).get_text()\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Title: ' + bc.CEND + str(title))\n except:\n title = ''\n pass\n try:\n location = soup.find('div', {'class': 'li-user-location'}\n ).get_text()\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Location: ' + bc.CEND + str(location))\n except:\n location = ''\n pass\n try:\n email = soup.find('span', {'id': 'email'}).get_text()\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Email: ' + bc.CEND + str(email))\n except:\n email = ''\n pass\n self.info_dict.update({'profile': profile, 'name': name, 'location':\n location, 'company': company, 'title': title, 'email': email})\n bi.outdata['linkedin'] = self.info_dict\n print()\n return\n", "step-4": "from __future__ import print_function\nfrom __future__ import absolute_import\nimport requests\nfrom bs4 import BeautifulSoup\nimport logging\nfrom plugins.base import PageGrabber\nfrom plugins.colors import BodyColors as bc\nimport json\ntry:\n import __builtin__ as bi\nexcept:\n import builtins as bi\n\n\nclass LinkedInGrabber(PageGrabber):\n\n def get_info(self, email):\n client = requests.Session()\n print('[' + bc.CPRP + '?' + bc.CEND + '] ' + bc.CCYN + 'LinkedIn' +\n bc.CEND)\n HOMEPAGE_URL = 'https://www.linkedin.com'\n LOGIN_URL = 'https://www.linkedin.com/uas/login-submit'\n LOGOUT_URL = 'https://www.linkedin.com/m/logout'\n source = client.get(HOMEPAGE_URL).content\n soup = self.get_dom(source)\n csrf = soup.find(id='loginCsrfParam-login')['value']\n try:\n with open('./storage/fb_login', 'r') as fbinfo:\n login_information = json.loads(fbinfo.read())\n login_information['loginCsrfParam'] = csrf\n except:\n login_information = {'session_key': '', 'session_password': '',\n 'loginCsrfParam': ''}\n pass\n if not login_information['session_key']:\n if login_information['session_password'] == '':\n print(' [' + bc.CRED + 'ATTENTION' + bc.CEND + '] ' + bc.\n CYLW +\n \"\"\"\tThis module requires authentication to use it properly.\n\tIt will store Credential pairs in plain-text.\"\"\"\n + bc.CEND)\n print(' [' + bc.CRED + 'ATTENTION' + bc.CEND + '] ' + bc.\n CYLW +\n 'This could produce a trail and identify the used account.'\n + bc.CEND)\n print()\n savecreds = raw_input(\n '[{}?{}] {}Would you like to save credentials now? {}(Y/n){}]: '\n .format(bc.CRED, bc.CEND, bc.CRED, bc.CYLW, bc.CEND))\n print()\n luser = raw_input(' [' + bc.CRED + '?' + bc.CEND + '] ' +\n bc.CYLW + 'What is your throw-away linkedin username: ' +\n bc.CEND)\n lpass = raw_input(' [' + bc.CRED + '?' + bc.CEND + '] ' +\n bc.CYLW + 'What is your throw-away linkedin password: ' +\n bc.CEND)\n login_information = {'session_key': luser,\n 'session_password': lpass, 'loginCsrfParam': csrf}\n if str(savecreds).lower() in ['y', 'yes']:\n try:\n with open('./storage/fb_login', 'w') as fbinfo:\n fbinfo.write(json.dumps(login_information))\n except Exception as failedtowrite:\n print('Failed to write fbinfo to file: %s' %\n failedtowrite)\n try:\n client.post(LOGIN_URL, data=login_information)\n results = client.get(\n 'https://linkedin.com/sales/gmail/profile/viewByEmail/' +\n str(email)).text\n except Exception as failedlinkedinauth:\n print((' [' + bc.CRED + 'X' + bc.CEND + '] ' + bc.CYLW +\n 'This module did not properly authenticate: %s' + bc.CEND) %\n failedlinkedinauth)\n soup = self.get_dom(results)\n self.get_source(LOGOUT_URL)\n try:\n profile = soup.find('a', attrs={'class':\n 'li-hover-under li-txt-black-85'})['href']\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Profile: ' + bc.CEND + str(profile))\n except:\n print(' [' + bc.CRED + 'X' + bc.CEND + '] ' + bc.CYLW +\n \"\"\"No LinkedIn account found.\n\"\"\" + bc.CEND)\n return\n try:\n fname = soup.find('span', attrs={'id': 'li-profile-name'})[\n 'data-fname']\n lname = soup.find('span', attrs={'id': 'li-profile-name'})[\n 'data-lname']\n name = str(fname) + ' ' + str(lname)\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Name: ' + bc.CEND + str(fname) + ' ' + str(lname))\n except:\n name = ''\n pass\n try:\n company = soup.find('span', {'class': 'li-user-title-company'}\n ).get_text()\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Company: ' + bc.CEND + str(company))\n except:\n company = ''\n pass\n try:\n title = soup.find('div', {'class': 'li-user-title'}).get_text()\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Title: ' + bc.CEND + str(title))\n except:\n title = ''\n pass\n try:\n location = soup.find('div', {'class': 'li-user-location'}\n ).get_text()\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Location: ' + bc.CEND + str(location))\n except:\n location = ''\n pass\n try:\n email = soup.find('span', {'id': 'email'}).get_text()\n print(' [' + bc.CGRN + '+' + bc.CEND + '] ' + bc.CRED +\n 'Email: ' + bc.CEND + str(email))\n except:\n email = ''\n pass\n self.info_dict.update({'profile': profile, 'name': name, 'location':\n location, 'company': company, 'title': title, 'email': email})\n bi.outdata['linkedin'] = self.info_dict\n print()\n return\n", "step-5": "from __future__ import print_function\nfrom __future__ import absolute_import\n#\n# LinkedIn Sales Module\n#\nimport requests\nfrom bs4 import BeautifulSoup\nimport logging\nfrom plugins.base import PageGrabber\nfrom plugins.colors import BodyColors as bc\nimport json\ntry:\n import __builtin__ as bi\nexcept:\n import builtins as bi\n\n\nclass LinkedInGrabber(PageGrabber): # LinkedIN.com sales scraper for email lookups\n def get_info(self,email): # Requires AUTH, login and request AUTHENTICATED pages from linkedin\n client = requests.Session() # Establish the session()\n print(\"[\"+bc.CPRP+\"?\"+bc.CEND+\"] \"+bc.CCYN + \"LinkedIn\" + bc.CEND)\n HOMEPAGE_URL = 'https://www.linkedin.com' # Set homepage for linkedin\n LOGIN_URL = 'https://www.linkedin.com/uas/login-submit' # Set login page for linkedin\n LOGOUT_URL = 'https://www.linkedin.com/m/logout'\n source = client.get(HOMEPAGE_URL).content # Request source\n soup = self.get_dom(source) # BS DOM\n csrf = soup.find(id=\"loginCsrfParam-login\")['value']\n #\n # ATTENTION:: YOU MUST POPULATE THE FOLLOWING WITH YOUR REAL CREDENTIALS\n #\n # ATTENTION:: THIS WILL NOT WORK PROPRLY OTHERWISE\n #\n # session_key = email session_password = your password\n #\n try:\n with open('./storage/fb_login', 'r') as fbinfo:\n login_information = json.loads(fbinfo.read())\n #print(json.loads(login_information))\n login_information['loginCsrfParam'] = csrf\n except:\n login_information = {\n 'session_key':'',\n 'session_password':'',\n 'loginCsrfParam': '',\n }\n pass\n if not login_information['session_key']:\n if login_information['session_password'] == '': # If no modifications of default u/p, print error, return\n print (\" [\"+bc.CRED+\"ATTENTION\"+bc.CEND+\"] \" + \\\n bc.CYLW+\"\\tThis module requires authentication to use it properly.\\n\\tIt will store Credential pairs in plain-text.\"+bc.CEND)\n print (\" [\"+bc.CRED+\"ATTENTION\"+bc.CEND+\"] \" + \\\n bc.CYLW + \"This could produce a trail and identify the used account.\"+bc.CEND)\n print()\n savecreds = raw_input(\"[{}?{}] {}Would you like to save credentials now? {}(Y/n){}]: \".format(bc.CRED,bc.CEND,bc.CRED,bc.CYLW,bc.CEND))\n print()\n luser = raw_input(\" [\"+bc.CRED+\"?\"+bc.CEND+\"] \" + \\\n bc.CYLW+\"What is your throw-away linkedin username: \"+bc.CEND)\n lpass = raw_input(\" [\"+bc.CRED+\"?\"+bc.CEND+\"] \" + \\\n bc.CYLW+\"What is your throw-away linkedin password: \"+bc.CEND)\n login_information = {\n 'session_key':luser,\n 'session_password':lpass,\n 'loginCsrfParam': csrf,\n }\n if str(savecreds).lower() in ['y','yes']:\n try:\n with open('./storage/fb_login','w') as fbinfo:\n fbinfo.write(json.dumps(login_information))\n except Exception as failedtowrite:\n print((\"Failed to write fbinfo to file: %s\") % failedtowrite)\n try:\n client.post(LOGIN_URL, data=login_information)\n results = client.get('https://linkedin.com/sales/gmail/profile/viewByEmail/'+str(email)).text\n except Exception as failedlinkedinauth:\n print((\" [\"+bc.CRED+\"X\"+bc.CEND+\"] \" + \\\n bc.CYLW+\"This module did not properly authenticate: %s\" + \\\n bc.CEND) % failedlinkedinauth)\n soup = self.get_dom(results)\n self.get_source(LOGOUT_URL) # Log out of LinkedIn, kills sessionID\n try: # Search and set from results\n profile = soup.find('a',attrs={'class': 'li-hover-under li-txt-black-85'})['href']\n print(\" [\"+bc.CGRN+\"+\"+bc.CEND+\"] \"+ \\\n bc.CRED+\"Profile: \"+bc.CEND + \\\n str(profile)\n )\n except:\n print(\" [\"+bc.CRED+\"X\"+bc.CEND+\"] \" + \\\n bc.CYLW+\"No LinkedIn account found.\\n\" + \\\n bc.CEND\n )\n return\n try:\n fname = soup.find('span',attrs={'id': 'li-profile-name'})['data-fname']\n lname = soup.find('span',attrs={'id': 'li-profile-name'})['data-lname']\n name = str(fname) + \" \" + str(lname)\n print(\" [\"+bc.CGRN+\"+\"+bc.CEND+\"] \" + \\\n bc.CRED+\"Name: \" + \\\n bc.CEND+ str(fname) + \\\n \" \" + \\\n str(lname)\n )\n except:\n name = \"\"\n pass # print (\" [\"+bc.CRED+\"X\"+bc.CEND+\"] \"+bc.CYLW+\"No username can be found.\\n\"+bc.CEND)\n try:\n company = soup.find('span',{'class': 'li-user-title-company'}).get_text()\n print(\" [\"+bc.CGRN+\"+\"+bc.CEND+\"] \" + \\\n bc.CRED+\"Company: \" + \\\n bc.CEND + str(company)\n )\n except:\n company = \"\"\n pass # print (\" [\"+bc.CRED+\"X\"+bc.CEND+\"] \"+bc.CYLW+\"No Company can be found.\\n\"+bc.CEND)\n try:\n title = soup.find('div',{'class':'li-user-title'}).get_text()\n print(\" [\"+bc.CGRN+\"+\"+bc.CEND+\"] \" + \\\n bc.CRED+\"Title: \" + \\\n bc.CEND+\\\n str(title)\n )\n except:\n title = \"\"\n pass #print (\" [\"+bc.CRED+\"X\"+bc.CEND+\"] \"+bc.CYLW+\"No Job Title can be found.\\n\"+bc.CEND)\n try:\n location = soup.find('div', {'class':'li-user-location'}).get_text()\n print(\" [\"+bc.CGRN+\"+\"+bc.CEND+\"] \"+bc.CRED+\"Location: \"+bc.CEND+ str(location))\n except:\n location = \"\"\n pass #print (\" [\"+bc.CRED+\"X\"+bc.CEND+\"] \"+bc.CYLW+\"No Location can be found.\\n\"+bc.CEND)\n try:\n email = soup.find('span', {'id':'email'}).get_text()\n print(\" [\"+bc.CGRN+\"+\"+bc.CEND+\"] \"+bc.CRED+\"Email: \"+bc.CEND+ str(email))\n except:\n email =\"\"\n pass #print (\" [\"+bc.CRED+\"X\"+bc.CEND+\"] \"+bc.CYLW+\"No Email account found.\\n\"+bc.CEND)\n self.info_dict.update({\n \"profile\": profile,\n \"name\": name,\n \"location\": location,\n \"company\": company,\n \"title\":title,\n \"email\":email\n })\n bi.outdata['linkedin'] = self.info_dict\n print()\n return\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
#!/usr/bin/env python # coding: utf-8 import sys sys.path.insert(0, "/code/huggingface/transformers-fair-wmt/src") import logging logging.disable(logging.INFO) # disable INFO and DEBUG logger everywhere from transformers.tokenization_fsmt import FSMTTokenizer from transformers.modeling_fsmt import FSMTForConditionalGeneration def translate(src, tgt, text): # to switch to local model #mname = "/code/huggingface/transformers-fair-wmt/data/wmt19-{src}-{tgt}" # s3 uploaded model mname = f"stas/wmt19-{src}-{tgt}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) encoded = tokenizer.encode(text, return_tensors='pt') # print(encoded) output = model.generate(encoded, num_beams=5, early_stopping=True)[0] # print(output) decoded = tokenizer.decode(output, skip_special_tokens=True) #print(decoded) return decoded def paraphrase(src, tgt, text): return translate(tgt, src, translate(src, tgt, text)) #text = """Here's a little song I wrote. You might want to sing it note for note. Don't worry, be happy. In every life we have some trouble. But when you worry you make it double. Don't worry, be happy. Don't worry, be happy now.""" text = "Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?" en_ru = paraphrase('en', 'ru', text) en_de = paraphrase('en', 'de', text) # print together to avoid the logger noise :( print("Paraphrasing:") print(f"en : {text}") print(f"en-ru-en: {en_ru}") print(f"en-de-en: {en_de}") # Paraphrasing: # en : Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today? # en-ru-en: Every morning when I wake up, I have a delightful joy - the joy of being Salvador Dali - and I ask myself in delight: What wonderful things is this Salvador Dali going to do today? # en-de-en: Every morning when I wake up, I experience an extraordinary joy - the joy of being Salvador Dalí - and I wonder with delight: what wonderful things will this Salvador Dalí do today?
normal
{ "blob_id": "7864138459caf469a0148420718b2282598141de", "index": 6674, "step-1": "<mask token>\n\n\ndef translate(src, tgt, text):\n mname = f'stas/wmt19-{src}-{tgt}'\n tokenizer = FSMTTokenizer.from_pretrained(mname)\n model = FSMTForConditionalGeneration.from_pretrained(mname)\n encoded = tokenizer.encode(text, return_tensors='pt')\n output = model.generate(encoded, num_beams=5, early_stopping=True)[0]\n decoded = tokenizer.decode(output, skip_special_tokens=True)\n return decoded\n\n\n<mask token>\n", "step-2": "<mask token>\nsys.path.insert(0, '/code/huggingface/transformers-fair-wmt/src')\n<mask token>\nlogging.disable(logging.INFO)\n<mask token>\n\n\ndef translate(src, tgt, text):\n mname = f'stas/wmt19-{src}-{tgt}'\n tokenizer = FSMTTokenizer.from_pretrained(mname)\n model = FSMTForConditionalGeneration.from_pretrained(mname)\n encoded = tokenizer.encode(text, return_tensors='pt')\n output = model.generate(encoded, num_beams=5, early_stopping=True)[0]\n decoded = tokenizer.decode(output, skip_special_tokens=True)\n return decoded\n\n\ndef paraphrase(src, tgt, text):\n return translate(tgt, src, translate(src, tgt, text))\n\n\n<mask token>\nprint('Paraphrasing:')\nprint(f'en : {text}')\nprint(f'en-ru-en: {en_ru}')\nprint(f'en-de-en: {en_de}')\n", "step-3": "<mask token>\nsys.path.insert(0, '/code/huggingface/transformers-fair-wmt/src')\n<mask token>\nlogging.disable(logging.INFO)\n<mask token>\n\n\ndef translate(src, tgt, text):\n mname = f'stas/wmt19-{src}-{tgt}'\n tokenizer = FSMTTokenizer.from_pretrained(mname)\n model = FSMTForConditionalGeneration.from_pretrained(mname)\n encoded = tokenizer.encode(text, return_tensors='pt')\n output = model.generate(encoded, num_beams=5, early_stopping=True)[0]\n decoded = tokenizer.decode(output, skip_special_tokens=True)\n return decoded\n\n\ndef paraphrase(src, tgt, text):\n return translate(tgt, src, translate(src, tgt, text))\n\n\ntext = (\n 'Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?'\n )\nen_ru = paraphrase('en', 'ru', text)\nen_de = paraphrase('en', 'de', text)\nprint('Paraphrasing:')\nprint(f'en : {text}')\nprint(f'en-ru-en: {en_ru}')\nprint(f'en-de-en: {en_de}')\n", "step-4": "import sys\nsys.path.insert(0, '/code/huggingface/transformers-fair-wmt/src')\nimport logging\nlogging.disable(logging.INFO)\nfrom transformers.tokenization_fsmt import FSMTTokenizer\nfrom transformers.modeling_fsmt import FSMTForConditionalGeneration\n\n\ndef translate(src, tgt, text):\n mname = f'stas/wmt19-{src}-{tgt}'\n tokenizer = FSMTTokenizer.from_pretrained(mname)\n model = FSMTForConditionalGeneration.from_pretrained(mname)\n encoded = tokenizer.encode(text, return_tensors='pt')\n output = model.generate(encoded, num_beams=5, early_stopping=True)[0]\n decoded = tokenizer.decode(output, skip_special_tokens=True)\n return decoded\n\n\ndef paraphrase(src, tgt, text):\n return translate(tgt, src, translate(src, tgt, text))\n\n\ntext = (\n 'Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?'\n )\nen_ru = paraphrase('en', 'ru', text)\nen_de = paraphrase('en', 'de', text)\nprint('Paraphrasing:')\nprint(f'en : {text}')\nprint(f'en-ru-en: {en_ru}')\nprint(f'en-de-en: {en_de}')\n", "step-5": "#!/usr/bin/env python\n# coding: utf-8\n\nimport sys\nsys.path.insert(0, \"/code/huggingface/transformers-fair-wmt/src\")\n\nimport logging\nlogging.disable(logging.INFO) # disable INFO and DEBUG logger everywhere\n\nfrom transformers.tokenization_fsmt import FSMTTokenizer\nfrom transformers.modeling_fsmt import FSMTForConditionalGeneration\n\ndef translate(src, tgt, text):\n # to switch to local model\n #mname = \"/code/huggingface/transformers-fair-wmt/data/wmt19-{src}-{tgt}\"\n # s3 uploaded model\n mname = f\"stas/wmt19-{src}-{tgt}\"\n tokenizer = FSMTTokenizer.from_pretrained(mname)\n model = FSMTForConditionalGeneration.from_pretrained(mname)\n\n encoded = tokenizer.encode(text, return_tensors='pt')\n # print(encoded)\n\n output = model.generate(encoded, num_beams=5, early_stopping=True)[0]\n # print(output)\n\n decoded = tokenizer.decode(output, skip_special_tokens=True)\n #print(decoded)\n return decoded\n\ndef paraphrase(src, tgt, text):\n return translate(tgt, src, translate(src, tgt, text))\n\n#text = \"\"\"Here's a little song I wrote. You might want to sing it note for note. Don't worry, be happy. In every life we have some trouble. But when you worry you make it double. Don't worry, be happy. Don't worry, be happy now.\"\"\"\n\ntext = \"Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?\"\n\nen_ru = paraphrase('en', 'ru', text)\nen_de = paraphrase('en', 'de', text)\n# print together to avoid the logger noise :(\nprint(\"Paraphrasing:\")\nprint(f\"en : {text}\")\nprint(f\"en-ru-en: {en_ru}\")\nprint(f\"en-de-en: {en_de}\")\n\n# Paraphrasing:\n# en : Every morning when I wake up, I experience an exquisite joy - the joy of being Salvador Dalí - and I ask myself in rapture: What wonderful things is this Salvador Dalí going to accomplish today?\n# en-ru-en: Every morning when I wake up, I have a delightful joy - the joy of being Salvador Dali - and I ask myself in delight: What wonderful things is this Salvador Dali going to do today?\n# en-de-en: Every morning when I wake up, I experience an extraordinary joy - the joy of being Salvador Dalí - and I wonder with delight: what wonderful things will this Salvador Dalí do today?\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
<|reserved_special_token_0|> class BuildDataset(data.Dataset): <|reserved_special_token_0|> def __init__(self, imgs_path, labels, extra_info=None, transform=None): """ The constructor gets the images path and their respectively labels and extra information (if it exists). In addition, you can specify some transform operation to be carry out on the images. It's important to note the images must match with the labels (an extra information if exist). For example, the imgs_path[x]'s label must take place on labels[x]. Parameters: :param imgs_path (list): a list of string containing the image paths :param labels (list) a list of labels for each image :param extra_info (list): a list of extra information regarding each image. If None, there is no information. Defaul is None. :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images """ self.imgs_path = imgs_path self.labels = labels self.extra_info = extra_info if transform is not None: self.transform = transform else: self.transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()]) def __len__(self): """ This method just returns the dataset size """ return len(self.imgs_path) <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class BuildDataset(data.Dataset): """ This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset class and implement the following methods: __len__, __getitem__ and the constructor __init__ """ def __init__(self, imgs_path, labels, extra_info=None, transform=None): """ The constructor gets the images path and their respectively labels and extra information (if it exists). In addition, you can specify some transform operation to be carry out on the images. It's important to note the images must match with the labels (an extra information if exist). For example, the imgs_path[x]'s label must take place on labels[x]. Parameters: :param imgs_path (list): a list of string containing the image paths :param labels (list) a list of labels for each image :param extra_info (list): a list of extra information regarding each image. If None, there is no information. Defaul is None. :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images """ self.imgs_path = imgs_path self.labels = labels self.extra_info = extra_info if transform is not None: self.transform = transform else: self.transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()]) def __len__(self): """ This method just returns the dataset size """ return len(self.imgs_path) def __getitem__(self, item): """ It gets the image, labels and extra information (if it exists) according to the index informed in `item`. It also performs the transform on the image. :param item (int): an index in the interval [0, ..., len(img_paths)-1] :return (tuple): a tuple containing the image, its label and extra information (if it exists) """ image = Image.open(self.imgs_path[item]).convert('RGB') image = self.transform(image) img_name = self.imgs_path[item].split('/')[-1].split('.')[0] if self.extra_info is None: extra_info = [] else: extra_info = self.extra_info[item] if self.labels is None: labels = [] else: labels = self.labels[item] return image, labels, extra_info, img_name <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class BuildDataset(data.Dataset): """ This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset class and implement the following methods: __len__, __getitem__ and the constructor __init__ """ def __init__(self, imgs_path, labels, extra_info=None, transform=None): """ The constructor gets the images path and their respectively labels and extra information (if it exists). In addition, you can specify some transform operation to be carry out on the images. It's important to note the images must match with the labels (an extra information if exist). For example, the imgs_path[x]'s label must take place on labels[x]. Parameters: :param imgs_path (list): a list of string containing the image paths :param labels (list) a list of labels for each image :param extra_info (list): a list of extra information regarding each image. If None, there is no information. Defaul is None. :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images """ self.imgs_path = imgs_path self.labels = labels self.extra_info = extra_info if transform is not None: self.transform = transform else: self.transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()]) def __len__(self): """ This method just returns the dataset size """ return len(self.imgs_path) def __getitem__(self, item): """ It gets the image, labels and extra information (if it exists) according to the index informed in `item`. It also performs the transform on the image. :param item (int): an index in the interval [0, ..., len(img_paths)-1] :return (tuple): a tuple containing the image, its label and extra information (if it exists) """ image = Image.open(self.imgs_path[item]).convert('RGB') image = self.transform(image) img_name = self.imgs_path[item].split('/')[-1].split('.')[0] if self.extra_info is None: extra_info = [] else: extra_info = self.extra_info[item] if self.labels is None: labels = [] else: labels = self.labels[item] return image, labels, extra_info, img_name def get_data_loader(imgs_path, labels, extra_info=None, transform=None, params=None): """ This function gets a list og images path, their labels and extra information (if it exists) and returns a DataLoader for these files. You also can set some transformations using torchvision.transforms in order to perform data augmentation. Lastly, params is a dictionary that you can set the following parameters: batch_size (int): the batch size for the dataset. If it's not informed the default is 30 shuf (bool): set it true if wanna shuffe the dataset. If it's not informed the default is True num_workers (int): the number thread in CPU to load the dataset. If it's not informed the default is 0 (which :param imgs_path (list): a list of string containing the images path :param labels (list): a list of labels for each image :param extra_info (list, optional): a list of extra information regarding each image. If it's None, it means there's no extra information. Default is None :param transform (torchvision.transforms, optional): use the torchvision.transforms.compose to perform the data augmentation for the dataset. Alternatively, you can use the jedy.pytorch.utils.augmentation to perform the augmentation. If it's None, none augmentation will be perform. Default is None :param params (dictionary, optional): this dictionary contains the following parameters: batch_size: the batch size. If the key is not informed or params = None, the default value will be 30 shuf: if you'd like to shuffle the dataset. If the key is not informed or params = None, the default value will be True num_workers: the number of threads to be used in CPU. If the key is not informed or params = None, the default value will be 4 pin_memory = set it to True to Pytorch preload the images on GPU. If the key is not informed or params = None, the default value will be True :return (torch.utils.data.DataLoader): a dataloader with the dataset and the chose params """ dt = BuildDataset(imgs_path, labels, extra_info, transform) batch_size = 30 shuf = True num_workers = 4 pin_memory = True if params is not None: if 'batch_size' in params.keys(): batch_size = params['batch_size'] if 'shuf' in params.keys(): shuf = params['shuf'] if 'num_workers' in params.keys(): num_workers = params['num_workers'] if 'pin_memory' in params.keys(): pin_memory = params['pin_memory'] dl = data.DataLoader(dataset=dt, batch_size=batch_size, shuffle=shuf, num_workers=num_workers, pin_memory=pin_memory) return dl <|reserved_special_token_1|> <|reserved_special_token_0|> from PIL import Image from torch.utils import data import torchvision.transforms as transforms class BuildDataset(data.Dataset): """ This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset class and implement the following methods: __len__, __getitem__ and the constructor __init__ """ def __init__(self, imgs_path, labels, extra_info=None, transform=None): """ The constructor gets the images path and their respectively labels and extra information (if it exists). In addition, you can specify some transform operation to be carry out on the images. It's important to note the images must match with the labels (an extra information if exist). For example, the imgs_path[x]'s label must take place on labels[x]. Parameters: :param imgs_path (list): a list of string containing the image paths :param labels (list) a list of labels for each image :param extra_info (list): a list of extra information regarding each image. If None, there is no information. Defaul is None. :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images """ self.imgs_path = imgs_path self.labels = labels self.extra_info = extra_info if transform is not None: self.transform = transform else: self.transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()]) def __len__(self): """ This method just returns the dataset size """ return len(self.imgs_path) def __getitem__(self, item): """ It gets the image, labels and extra information (if it exists) according to the index informed in `item`. It also performs the transform on the image. :param item (int): an index in the interval [0, ..., len(img_paths)-1] :return (tuple): a tuple containing the image, its label and extra information (if it exists) """ image = Image.open(self.imgs_path[item]).convert('RGB') image = self.transform(image) img_name = self.imgs_path[item].split('/')[-1].split('.')[0] if self.extra_info is None: extra_info = [] else: extra_info = self.extra_info[item] if self.labels is None: labels = [] else: labels = self.labels[item] return image, labels, extra_info, img_name def get_data_loader(imgs_path, labels, extra_info=None, transform=None, params=None): """ This function gets a list og images path, their labels and extra information (if it exists) and returns a DataLoader for these files. You also can set some transformations using torchvision.transforms in order to perform data augmentation. Lastly, params is a dictionary that you can set the following parameters: batch_size (int): the batch size for the dataset. If it's not informed the default is 30 shuf (bool): set it true if wanna shuffe the dataset. If it's not informed the default is True num_workers (int): the number thread in CPU to load the dataset. If it's not informed the default is 0 (which :param imgs_path (list): a list of string containing the images path :param labels (list): a list of labels for each image :param extra_info (list, optional): a list of extra information regarding each image. If it's None, it means there's no extra information. Default is None :param transform (torchvision.transforms, optional): use the torchvision.transforms.compose to perform the data augmentation for the dataset. Alternatively, you can use the jedy.pytorch.utils.augmentation to perform the augmentation. If it's None, none augmentation will be perform. Default is None :param params (dictionary, optional): this dictionary contains the following parameters: batch_size: the batch size. If the key is not informed or params = None, the default value will be 30 shuf: if you'd like to shuffle the dataset. If the key is not informed or params = None, the default value will be True num_workers: the number of threads to be used in CPU. If the key is not informed or params = None, the default value will be 4 pin_memory = set it to True to Pytorch preload the images on GPU. If the key is not informed or params = None, the default value will be True :return (torch.utils.data.DataLoader): a dataloader with the dataset and the chose params """ dt = BuildDataset(imgs_path, labels, extra_info, transform) batch_size = 30 shuf = True num_workers = 4 pin_memory = True if params is not None: if 'batch_size' in params.keys(): batch_size = params['batch_size'] if 'shuf' in params.keys(): shuf = params['shuf'] if 'num_workers' in params.keys(): num_workers = params['num_workers'] if 'pin_memory' in params.keys(): pin_memory = params['pin_memory'] dl = data.DataLoader(dataset=dt, batch_size=batch_size, shuffle=shuf, num_workers=num_workers, pin_memory=pin_memory) return dl <|reserved_special_token_1|> #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Author: André Pacheco E-mail: [email protected] This file implements the methods and functions to load the image as a PyTorch dataset If you find any bug or have some suggestion, please, email me. """ from PIL import Image from torch.utils import data import torchvision.transforms as transforms class BuildDataset (data.Dataset): """ This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset class and implement the following methods: __len__, __getitem__ and the constructor __init__ """ def __init__(self, imgs_path, labels, extra_info=None, transform=None): """ The constructor gets the images path and their respectively labels and extra information (if it exists). In addition, you can specify some transform operation to be carry out on the images. It's important to note the images must match with the labels (an extra information if exist). For example, the imgs_path[x]'s label must take place on labels[x]. Parameters: :param imgs_path (list): a list of string containing the image paths :param labels (list) a list of labels for each image :param extra_info (list): a list of extra information regarding each image. If None, there is no information. Defaul is None. :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images """ self.imgs_path = imgs_path self.labels = labels self.extra_info = extra_info # if transform is None, we need to ensure that the PIL image will be transformed to tensor, otherwise we'll got # an exception if (transform is not None): self.transform = transform else: self.transform = transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor() ]) def __len__(self): """ This method just returns the dataset size """ return len(self.imgs_path) def __getitem__(self, item): """ It gets the image, labels and extra information (if it exists) according to the index informed in `item`. It also performs the transform on the image. :param item (int): an index in the interval [0, ..., len(img_paths)-1] :return (tuple): a tuple containing the image, its label and extra information (if it exists) """ image = Image.open(self.imgs_path[item]).convert("RGB") # Applying the transformations image = self.transform(image) img_name = self.imgs_path[item].split('/')[-1].split('.')[0] # print(self.labels[item]) # print(self.extra_info[item]) if self.extra_info is None: extra_info = [] else: extra_info = self.extra_info[item] if self.labels is None: labels = [] else: labels = self.labels[item] return image, labels, extra_info, img_name def get_data_loader (imgs_path, labels, extra_info=None, transform=None, params=None): """ This function gets a list og images path, their labels and extra information (if it exists) and returns a DataLoader for these files. You also can set some transformations using torchvision.transforms in order to perform data augmentation. Lastly, params is a dictionary that you can set the following parameters: batch_size (int): the batch size for the dataset. If it's not informed the default is 30 shuf (bool): set it true if wanna shuffe the dataset. If it's not informed the default is True num_workers (int): the number thread in CPU to load the dataset. If it's not informed the default is 0 (which :param imgs_path (list): a list of string containing the images path :param labels (list): a list of labels for each image :param extra_info (list, optional): a list of extra information regarding each image. If it's None, it means there's no extra information. Default is None :param transform (torchvision.transforms, optional): use the torchvision.transforms.compose to perform the data augmentation for the dataset. Alternatively, you can use the jedy.pytorch.utils.augmentation to perform the augmentation. If it's None, none augmentation will be perform. Default is None :param params (dictionary, optional): this dictionary contains the following parameters: batch_size: the batch size. If the key is not informed or params = None, the default value will be 30 shuf: if you'd like to shuffle the dataset. If the key is not informed or params = None, the default value will be True num_workers: the number of threads to be used in CPU. If the key is not informed or params = None, the default value will be 4 pin_memory = set it to True to Pytorch preload the images on GPU. If the key is not informed or params = None, the default value will be True :return (torch.utils.data.DataLoader): a dataloader with the dataset and the chose params """ dt = BuildDataset(imgs_path, labels, extra_info, transform) # Checking the params values. If it's not defined in params of if params is None, the default values are described # below: batch_size = 30 shuf = True num_workers = 4 pin_memory = True # However, if the params is defined, we used the values described on it: if (params is not None): if ('batch_size' in params.keys()): batch_size = params['batch_size'] if ('shuf' in params.keys()): shuf = params['shuf'] if ('num_workers' in params.keys()): num_workers = params['num_workers'] if ('pin_memory' in params.keys()): pin_memory = params['pin_memory'] # Calling the dataloader dl = data.DataLoader (dataset=dt, batch_size=batch_size, shuffle=shuf, num_workers=num_workers, pin_memory=pin_memory) return dl
flexible
{ "blob_id": "4e31c2a80bec77a1f5aafc8a91617fb4b2941788", "index": 432, "step-1": "<mask token>\n\n\nclass BuildDataset(data.Dataset):\n <mask token>\n\n def __init__(self, imgs_path, labels, extra_info=None, transform=None):\n \"\"\"\n The constructor gets the images path and their respectively labels and extra information (if it exists).\n In addition, you can specify some transform operation to be carry out on the images.\n\n It's important to note the images must match with the labels (an extra information if exist). For example, the\n imgs_path[x]'s label must take place on labels[x].\n\n Parameters:\n :param imgs_path (list): a list of string containing the image paths\n :param labels (list) a list of labels for each image\n :param extra_info (list): a list of extra information regarding each image. If None, there is no information.\n Defaul is None.\n :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images\n \"\"\"\n self.imgs_path = imgs_path\n self.labels = labels\n self.extra_info = extra_info\n if transform is not None:\n self.transform = transform\n else:\n self.transform = transforms.Compose([transforms.Resize((224, \n 224)), transforms.ToTensor()])\n\n def __len__(self):\n \"\"\" This method just returns the dataset size \"\"\"\n return len(self.imgs_path)\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass BuildDataset(data.Dataset):\n \"\"\"\n This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset\n class and implement the following methods: __len__, __getitem__ and the constructor __init__\n \"\"\"\n\n def __init__(self, imgs_path, labels, extra_info=None, transform=None):\n \"\"\"\n The constructor gets the images path and their respectively labels and extra information (if it exists).\n In addition, you can specify some transform operation to be carry out on the images.\n\n It's important to note the images must match with the labels (an extra information if exist). For example, the\n imgs_path[x]'s label must take place on labels[x].\n\n Parameters:\n :param imgs_path (list): a list of string containing the image paths\n :param labels (list) a list of labels for each image\n :param extra_info (list): a list of extra information regarding each image. If None, there is no information.\n Defaul is None.\n :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images\n \"\"\"\n self.imgs_path = imgs_path\n self.labels = labels\n self.extra_info = extra_info\n if transform is not None:\n self.transform = transform\n else:\n self.transform = transforms.Compose([transforms.Resize((224, \n 224)), transforms.ToTensor()])\n\n def __len__(self):\n \"\"\" This method just returns the dataset size \"\"\"\n return len(self.imgs_path)\n\n def __getitem__(self, item):\n \"\"\"\n It gets the image, labels and extra information (if it exists) according to the index informed in `item`.\n It also performs the transform on the image.\n\n :param item (int): an index in the interval [0, ..., len(img_paths)-1]\n :return (tuple): a tuple containing the image, its label and extra information (if it exists)\n \"\"\"\n image = Image.open(self.imgs_path[item]).convert('RGB')\n image = self.transform(image)\n img_name = self.imgs_path[item].split('/')[-1].split('.')[0]\n if self.extra_info is None:\n extra_info = []\n else:\n extra_info = self.extra_info[item]\n if self.labels is None:\n labels = []\n else:\n labels = self.labels[item]\n return image, labels, extra_info, img_name\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass BuildDataset(data.Dataset):\n \"\"\"\n This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset\n class and implement the following methods: __len__, __getitem__ and the constructor __init__\n \"\"\"\n\n def __init__(self, imgs_path, labels, extra_info=None, transform=None):\n \"\"\"\n The constructor gets the images path and their respectively labels and extra information (if it exists).\n In addition, you can specify some transform operation to be carry out on the images.\n\n It's important to note the images must match with the labels (an extra information if exist). For example, the\n imgs_path[x]'s label must take place on labels[x].\n\n Parameters:\n :param imgs_path (list): a list of string containing the image paths\n :param labels (list) a list of labels for each image\n :param extra_info (list): a list of extra information regarding each image. If None, there is no information.\n Defaul is None.\n :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images\n \"\"\"\n self.imgs_path = imgs_path\n self.labels = labels\n self.extra_info = extra_info\n if transform is not None:\n self.transform = transform\n else:\n self.transform = transforms.Compose([transforms.Resize((224, \n 224)), transforms.ToTensor()])\n\n def __len__(self):\n \"\"\" This method just returns the dataset size \"\"\"\n return len(self.imgs_path)\n\n def __getitem__(self, item):\n \"\"\"\n It gets the image, labels and extra information (if it exists) according to the index informed in `item`.\n It also performs the transform on the image.\n\n :param item (int): an index in the interval [0, ..., len(img_paths)-1]\n :return (tuple): a tuple containing the image, its label and extra information (if it exists)\n \"\"\"\n image = Image.open(self.imgs_path[item]).convert('RGB')\n image = self.transform(image)\n img_name = self.imgs_path[item].split('/')[-1].split('.')[0]\n if self.extra_info is None:\n extra_info = []\n else:\n extra_info = self.extra_info[item]\n if self.labels is None:\n labels = []\n else:\n labels = self.labels[item]\n return image, labels, extra_info, img_name\n\n\ndef get_data_loader(imgs_path, labels, extra_info=None, transform=None,\n params=None):\n \"\"\"\n This function gets a list og images path, their labels and extra information (if it exists) and returns a DataLoader\n for these files. You also can set some transformations using torchvision.transforms in order to perform data\n augmentation. Lastly, params is a dictionary that you can set the following parameters:\n batch_size (int): the batch size for the dataset. If it's not informed the default is 30\n shuf (bool): set it true if wanna shuffe the dataset. If it's not informed the default is True\n num_workers (int): the number thread in CPU to load the dataset. If it's not informed the default is 0 (which\n\n\n :param imgs_path (list): a list of string containing the images path\n :param labels (list): a list of labels for each image\n :param extra_info (list, optional): a list of extra information regarding each image. If it's None, it means there's\n no extra information. Default is None\n :param transform (torchvision.transforms, optional): use the torchvision.transforms.compose to perform the data\n augmentation for the dataset. Alternatively, you can use the jedy.pytorch.utils.augmentation to perform the\n augmentation. If it's None, none augmentation will be perform. Default is None\n :param params (dictionary, optional): this dictionary contains the following parameters:\n batch_size: the batch size. If the key is not informed or params = None, the default value will be 30\n shuf: if you'd like to shuffle the dataset. If the key is not informed or params = None,\n the default value will be True\n num_workers: the number of threads to be used in CPU. If the key is not informed or params = None, the default\n value will be 4\n pin_memory = set it to True to Pytorch preload the images on GPU. If the key is not informed or params = None,\n the default value will be True\n :return (torch.utils.data.DataLoader): a dataloader with the dataset and the chose params\n \"\"\"\n dt = BuildDataset(imgs_path, labels, extra_info, transform)\n batch_size = 30\n shuf = True\n num_workers = 4\n pin_memory = True\n if params is not None:\n if 'batch_size' in params.keys():\n batch_size = params['batch_size']\n if 'shuf' in params.keys():\n shuf = params['shuf']\n if 'num_workers' in params.keys():\n num_workers = params['num_workers']\n if 'pin_memory' in params.keys():\n pin_memory = params['pin_memory']\n dl = data.DataLoader(dataset=dt, batch_size=batch_size, shuffle=shuf,\n num_workers=num_workers, pin_memory=pin_memory)\n return dl\n", "step-4": "<mask token>\nfrom PIL import Image\nfrom torch.utils import data\nimport torchvision.transforms as transforms\n\n\nclass BuildDataset(data.Dataset):\n \"\"\"\n This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset\n class and implement the following methods: __len__, __getitem__ and the constructor __init__\n \"\"\"\n\n def __init__(self, imgs_path, labels, extra_info=None, transform=None):\n \"\"\"\n The constructor gets the images path and their respectively labels and extra information (if it exists).\n In addition, you can specify some transform operation to be carry out on the images.\n\n It's important to note the images must match with the labels (an extra information if exist). For example, the\n imgs_path[x]'s label must take place on labels[x].\n\n Parameters:\n :param imgs_path (list): a list of string containing the image paths\n :param labels (list) a list of labels for each image\n :param extra_info (list): a list of extra information regarding each image. If None, there is no information.\n Defaul is None.\n :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images\n \"\"\"\n self.imgs_path = imgs_path\n self.labels = labels\n self.extra_info = extra_info\n if transform is not None:\n self.transform = transform\n else:\n self.transform = transforms.Compose([transforms.Resize((224, \n 224)), transforms.ToTensor()])\n\n def __len__(self):\n \"\"\" This method just returns the dataset size \"\"\"\n return len(self.imgs_path)\n\n def __getitem__(self, item):\n \"\"\"\n It gets the image, labels and extra information (if it exists) according to the index informed in `item`.\n It also performs the transform on the image.\n\n :param item (int): an index in the interval [0, ..., len(img_paths)-1]\n :return (tuple): a tuple containing the image, its label and extra information (if it exists)\n \"\"\"\n image = Image.open(self.imgs_path[item]).convert('RGB')\n image = self.transform(image)\n img_name = self.imgs_path[item].split('/')[-1].split('.')[0]\n if self.extra_info is None:\n extra_info = []\n else:\n extra_info = self.extra_info[item]\n if self.labels is None:\n labels = []\n else:\n labels = self.labels[item]\n return image, labels, extra_info, img_name\n\n\ndef get_data_loader(imgs_path, labels, extra_info=None, transform=None,\n params=None):\n \"\"\"\n This function gets a list og images path, their labels and extra information (if it exists) and returns a DataLoader\n for these files. You also can set some transformations using torchvision.transforms in order to perform data\n augmentation. Lastly, params is a dictionary that you can set the following parameters:\n batch_size (int): the batch size for the dataset. If it's not informed the default is 30\n shuf (bool): set it true if wanna shuffe the dataset. If it's not informed the default is True\n num_workers (int): the number thread in CPU to load the dataset. If it's not informed the default is 0 (which\n\n\n :param imgs_path (list): a list of string containing the images path\n :param labels (list): a list of labels for each image\n :param extra_info (list, optional): a list of extra information regarding each image. If it's None, it means there's\n no extra information. Default is None\n :param transform (torchvision.transforms, optional): use the torchvision.transforms.compose to perform the data\n augmentation for the dataset. Alternatively, you can use the jedy.pytorch.utils.augmentation to perform the\n augmentation. If it's None, none augmentation will be perform. Default is None\n :param params (dictionary, optional): this dictionary contains the following parameters:\n batch_size: the batch size. If the key is not informed or params = None, the default value will be 30\n shuf: if you'd like to shuffle the dataset. If the key is not informed or params = None,\n the default value will be True\n num_workers: the number of threads to be used in CPU. If the key is not informed or params = None, the default\n value will be 4\n pin_memory = set it to True to Pytorch preload the images on GPU. If the key is not informed or params = None,\n the default value will be True\n :return (torch.utils.data.DataLoader): a dataloader with the dataset and the chose params\n \"\"\"\n dt = BuildDataset(imgs_path, labels, extra_info, transform)\n batch_size = 30\n shuf = True\n num_workers = 4\n pin_memory = True\n if params is not None:\n if 'batch_size' in params.keys():\n batch_size = params['batch_size']\n if 'shuf' in params.keys():\n shuf = params['shuf']\n if 'num_workers' in params.keys():\n num_workers = params['num_workers']\n if 'pin_memory' in params.keys():\n pin_memory = params['pin_memory']\n dl = data.DataLoader(dataset=dt, batch_size=batch_size, shuffle=shuf,\n num_workers=num_workers, pin_memory=pin_memory)\n return dl\n", "step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAuthor: André Pacheco\nE-mail: [email protected]\n\nThis file implements the methods and functions to load the image as a PyTorch dataset\n\nIf you find any bug or have some suggestion, please, email me.\n\"\"\"\n\nfrom PIL import Image\nfrom torch.utils import data\nimport torchvision.transforms as transforms\n\n\nclass BuildDataset (data.Dataset):\n \"\"\"\n This the standard way to implement a dataset pipeline in PyTorch. We need to extend the torch.utils.data.Dataset\n class and implement the following methods: __len__, __getitem__ and the constructor __init__\n \"\"\"\n\n def __init__(self, imgs_path, labels, extra_info=None, transform=None):\n \"\"\"\n The constructor gets the images path and their respectively labels and extra information (if it exists).\n In addition, you can specify some transform operation to be carry out on the images.\n\n It's important to note the images must match with the labels (an extra information if exist). For example, the\n imgs_path[x]'s label must take place on labels[x].\n\n Parameters:\n :param imgs_path (list): a list of string containing the image paths\n :param labels (list) a list of labels for each image\n :param extra_info (list): a list of extra information regarding each image. If None, there is no information.\n Defaul is None.\n :param transform (torchvision.transforms.transforms.Compose): transform operations to be carry out on the images\n \"\"\"\n\n self.imgs_path = imgs_path\n self.labels = labels\n self.extra_info = extra_info\n\n # if transform is None, we need to ensure that the PIL image will be transformed to tensor, otherwise we'll got\n # an exception\n if (transform is not None):\n self.transform = transform\n else:\n self.transform = transforms.Compose([\n transforms.Resize((224,224)),\n transforms.ToTensor()\n ])\n\n def __len__(self):\n \"\"\" This method just returns the dataset size \"\"\"\n return len(self.imgs_path)\n\n def __getitem__(self, item):\n \"\"\"\n It gets the image, labels and extra information (if it exists) according to the index informed in `item`.\n It also performs the transform on the image.\n\n :param item (int): an index in the interval [0, ..., len(img_paths)-1]\n :return (tuple): a tuple containing the image, its label and extra information (if it exists)\n \"\"\"\n\n image = Image.open(self.imgs_path[item]).convert(\"RGB\")\n\n # Applying the transformations\n image = self.transform(image)\n\n img_name = self.imgs_path[item].split('/')[-1].split('.')[0]\n # print(self.labels[item])\n # print(self.extra_info[item])\n\n if self.extra_info is None:\n extra_info = []\n else:\n extra_info = self.extra_info[item]\n\n if self.labels is None:\n labels = []\n else:\n labels = self.labels[item]\n\n return image, labels, extra_info, img_name\n\n\ndef get_data_loader (imgs_path, labels, extra_info=None, transform=None, params=None):\n \"\"\"\n This function gets a list og images path, their labels and extra information (if it exists) and returns a DataLoader\n for these files. You also can set some transformations using torchvision.transforms in order to perform data\n augmentation. Lastly, params is a dictionary that you can set the following parameters:\n batch_size (int): the batch size for the dataset. If it's not informed the default is 30\n shuf (bool): set it true if wanna shuffe the dataset. If it's not informed the default is True\n num_workers (int): the number thread in CPU to load the dataset. If it's not informed the default is 0 (which\n\n\n :param imgs_path (list): a list of string containing the images path\n :param labels (list): a list of labels for each image\n :param extra_info (list, optional): a list of extra information regarding each image. If it's None, it means there's\n no extra information. Default is None\n :param transform (torchvision.transforms, optional): use the torchvision.transforms.compose to perform the data\n augmentation for the dataset. Alternatively, you can use the jedy.pytorch.utils.augmentation to perform the\n augmentation. If it's None, none augmentation will be perform. Default is None\n :param params (dictionary, optional): this dictionary contains the following parameters:\n batch_size: the batch size. If the key is not informed or params = None, the default value will be 30\n shuf: if you'd like to shuffle the dataset. If the key is not informed or params = None,\n the default value will be True\n num_workers: the number of threads to be used in CPU. If the key is not informed or params = None, the default\n value will be 4\n pin_memory = set it to True to Pytorch preload the images on GPU. If the key is not informed or params = None,\n the default value will be True\n :return (torch.utils.data.DataLoader): a dataloader with the dataset and the chose params\n \"\"\"\n\n\n dt = BuildDataset(imgs_path, labels, extra_info, transform)\n\n # Checking the params values. If it's not defined in params of if params is None, the default values are described\n # below:\n batch_size = 30\n shuf = True\n num_workers = 4\n pin_memory = True\n\n # However, if the params is defined, we used the values described on it:\n if (params is not None):\n if ('batch_size' in params.keys()):\n batch_size = params['batch_size']\n if ('shuf' in params.keys()):\n shuf = params['shuf']\n if ('num_workers' in params.keys()):\n num_workers = params['num_workers']\n if ('pin_memory' in params.keys()):\n pin_memory = params['pin_memory']\n\n # Calling the dataloader\n dl = data.DataLoader (dataset=dt, batch_size=batch_size, shuffle=shuf, num_workers=num_workers,\n pin_memory=pin_memory)\n\n return dl\n\n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
<|reserved_special_token_0|> class trinet(object): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def generate_image_left(self, img, disp): return bilinear_sampler_1d_h(img, -disp) def generate_image_right(self, img, disp): return bilinear_sampler_1d_h(img, disp) def SSIM(self, x, y): C1 = 0.01 ** 2 C2 = 0.03 ** 2 mu_x = slim.avg_pool2d(x, 3, 1, 'VALID') mu_y = slim.avg_pool2d(y, 3, 1, 'VALID') sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2 sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2 sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2) SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2) SSIM = SSIM_n / SSIM_d return tf.clip_by_value((1 - SSIM) / 2, 0, 1) <|reserved_special_token_0|> def build_model(self, net): with tf.variable_scope('model', reuse=self.reuse_variables) as scope: self.left_pyramid = self.scale_pyramid(self.left, 4) self.right_pyramid = self.scale_pyramid(self.right, 4) self.central_pyramid = self.scale_pyramid(self.central, 4) with tf.variable_scope('shared-encoder'): features_cr = self.build_encoder(self.central, model_name=net) features_cl = features_cr with tf.variable_scope('encoder-C2R'): self.disp_c2r = self.build_decoder(features_cr, model_name=net) with tf.variable_scope('encoder-C2L'): self.disp_c2l = self.build_decoder(features_cl, model_name=net) def build_encoder(self, model_input, model_name='vgg'): with tf.variable_scope('encoder'): if model_name == 'vgg': conv1 = conv_block(model_input, 32, 7) conv2 = conv_block(conv1, 64, 5) conv3 = conv_block(conv2, 128, 3) conv4 = conv_block(conv3, 256, 3) conv5 = conv_block(conv4, 512, 3) conv6 = conv_block(conv5, 512, 3) conv7 = conv_block(conv6, 512, 3) return conv7, conv1, conv2, conv3, conv4, conv5, conv6 elif model_name == 'resnet50': conv1 = conv(model_input, 64, 7, 2) pool1 = maxpool(conv1, 3) conv2 = resblock(pool1, 64, 3) conv3 = resblock(conv2, 128, 4) conv4 = resblock(conv3, 256, 6) conv5 = resblock(conv4, 512, 3) return conv5, conv1, pool1, conv2, conv3, conv4 def build_decoder(self, skip, model_name='vgg'): with tf.variable_scope('decoder'): if model_name == 'vgg': upconv7 = upconv(skip[0], 512, 3, 2) concat7 = tf.concat([upconv7, skip[6]], 3) iconv7 = conv(concat7, 512, 3, 1) upconv6 = upconv(iconv7, 512, 3, 2) concat6 = tf.concat([upconv6, skip[5]], 3) iconv6 = conv(concat6, 512, 3, 1) upconv5 = upconv(iconv6, 256, 3, 2) concat5 = tf.concat([upconv5, skip[4]], 3) iconv5 = conv(concat5, 256, 3, 1) upconv4 = upconv(iconv5, 128, 3, 2) concat4 = tf.concat([upconv4, skip[3]], 3) iconv4 = conv(concat4, 128, 3, 1) disp4 = get_disp(iconv4) udisp4 = upsample_nn(disp4, 2) upconv3 = upconv(iconv4, 64, 3, 2) concat3 = tf.concat([upconv3, skip[2], udisp4], 3) iconv3 = conv(concat3, 64, 3, 1) disp3 = get_disp(iconv3) udisp3 = upsample_nn(disp3, 2) upconv2 = upconv(iconv3, 32, 3, 2) concat2 = tf.concat([upconv2, skip[1], udisp3], 3) iconv2 = conv(concat2, 32, 3, 1) disp2 = get_disp(iconv2) udisp2 = upsample_nn(disp2, 2) upconv1 = upconv(iconv2, 16, 3, 2) concat1 = tf.concat([upconv1, udisp2], 3) iconv1 = conv(concat1, 16, 3, 1) disp1 = get_disp(iconv1) elif model_name == 'resnet50': upconv6 = upconv(skip[0], 512, 3, 2) concat6 = tf.concat([upconv6, skip[5]], 3) iconv6 = conv(concat6, 512, 3, 1) upconv5 = upconv(iconv6, 256, 3, 2) concat5 = tf.concat([upconv5, skip[4]], 3) iconv5 = conv(concat5, 256, 3, 1) upconv4 = upconv(iconv5, 128, 3, 2) concat4 = tf.concat([upconv4, skip[3]], 3) iconv4 = conv(concat4, 128, 3, 1) disp4 = get_disp(iconv4) udisp4 = upsample_nn(disp4, 2) upconv3 = upconv(iconv4, 64, 3, 2) concat3 = tf.concat([upconv3, skip[2], udisp4], 3) iconv3 = conv(concat3, 64, 3, 1) disp3 = get_disp(iconv3) udisp3 = upsample_nn(disp3, 2) upconv2 = upconv(iconv3, 32, 3, 2) concat2 = tf.concat([upconv2, skip[1], udisp3], 3) iconv2 = conv(concat2, 32, 3, 1) disp2 = get_disp(iconv2) udisp2 = upsample_nn(disp2, 2) upconv1 = upconv(iconv2, 16, 3, 2) concat1 = tf.concat([upconv1, udisp2], 3) iconv1 = conv(concat1, 16, 3, 1) disp1 = get_disp(iconv1) return disp1, disp2, disp3, disp4 <|reserved_special_token_0|> def build_losses(self): with tf.variable_scope('losses', reuse=self.reuse_variables): self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for i in range(4)] self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in self.l1_left] self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[ i]) for i in range(4)] self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in self.l1_right] self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for i in range(4)] self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in self.l1_cl] self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for i in range(4)] self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in self.l1_cr] self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid [i]) for i in range(4)] self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left] self.ssim_right = [self.SSIM(self.right_est[i], self. right_pyramid[i]) for i in range(4)] self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right] self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[ i]) for i in range(4)] self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl] self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[ i]) for i in range(4)] self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr] self.image_loss_right = [(self.params.alpha_image_loss * self. ssim_loss_right[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_right[i]) for i in range(4)] self.image_loss_left = [(self.params.alpha_image_loss * self. ssim_loss_left[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_left[i]) for i in range(4)] self.image_loss_cl = [(self.params.alpha_image_loss * self. ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self .l1_reconstruction_loss_cl[i]) for i in range(4)] self.image_loss_cr = [(self.params.alpha_image_loss * self. ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self .l1_reconstruction_loss_cr[i]) for i in range(4)] self.image_loss = tf.add_n(self.image_loss_left + self. image_loss_cl + self.image_loss_right + self.image_loss_cr) self.image_loss_L = tf.add_n(self.image_loss_left + self. image_loss_cl) self.image_loss_R = tf.add_n(self.image_loss_right + self. image_loss_cr) self.disp_lc_loss = [(tf.reduce_mean(tf.abs(self. disp_lc_smoothness[i])) / 2 ** i) for i in range(4)] self.disp_cl_loss = [(tf.reduce_mean(tf.abs(self. disp_cl_smoothness[i])) / 2 ** i) for i in range(4)] self.disp_rc_loss = [(tf.reduce_mean(tf.abs(self. disp_rc_smoothness[i])) / 2 ** i) for i in range(4)] self.disp_cr_loss = [(tf.reduce_mean(tf.abs(self. disp_cr_smoothness[i])) / 2 ** i) for i in range(4)] self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self. disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss) self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self. disp_cl_loss) self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self. disp_cr_loss) self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] - self.disp_lc[i])) for i in range(4)] self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] - self.disp_cl[i])) for i in range(4)] self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] - self.disp_rc[i])) for i in range(4)] self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] - self.disp_cr[i])) for i in range(4)] self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss + self.lr_rc_loss + self.lr_cr_loss) self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss) self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss) self.central_disparity_dif = [tf.reduce_mean(tf.abs(self. disp_cl[i] - self.disp_cr[i])) for i in range(4)] self.central_disparity_loss = tf.add_n(self.central_disparity_dif) self.total_loss = (self.image_loss + self.params. disp_gradient_loss_weight * self.disp_gradient_loss + self. params.lr_loss_weight * self.lr_loss + self. central_disparity_loss) self.total_loss_L = (self.image_loss_L + self.params. disp_gradient_loss_weight * self.disp_gradient_loss_L + self.params.lr_loss_weight * self.lr_loss_L) self.total_loss_R = (self.image_loss_R + self.params. disp_gradient_loss_weight * self.disp_gradient_loss_R + self.params.lr_loss_weight * self.lr_loss_R) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class trinet(object): def __init__(self, params, mode, left, central, right, reuse_variables= None, model_index=0, net='vgg'): self.params = params self.mode = mode self.model_collection = ['model_0'] self.left = left self.right = right self.central = central self.reuse_variables = reuse_variables self.model_index = model_index self.build_model(net) self.build_outputs() if self.mode == 'test': return self.build_losses() self.build_summaries() def gradient_x(self, img): gx = img[:, :, :-1, :] - img[:, :, 1:, :] return gx def gradient_y(self, img): gy = img[:, :-1, :, :] - img[:, 1:, :, :] return gy def scale_pyramid(self, img, num_scales): scaled_imgs = [img] s = tf.shape(img) h = s[1] w = s[2] for i in range(num_scales - 1): ratio = 2 ** (i + 1) nh = h // ratio nw = w // ratio scaled_imgs.append(tf.image.resize_area(img, [nh, nw])) return scaled_imgs def generate_image_left(self, img, disp): return bilinear_sampler_1d_h(img, -disp) def generate_image_right(self, img, disp): return bilinear_sampler_1d_h(img, disp) def SSIM(self, x, y): C1 = 0.01 ** 2 C2 = 0.03 ** 2 mu_x = slim.avg_pool2d(x, 3, 1, 'VALID') mu_y = slim.avg_pool2d(y, 3, 1, 'VALID') sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2 sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2 sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2) SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2) SSIM = SSIM_n / SSIM_d return tf.clip_by_value((1 - SSIM) / 2, 0, 1) <|reserved_special_token_0|> def build_model(self, net): with tf.variable_scope('model', reuse=self.reuse_variables) as scope: self.left_pyramid = self.scale_pyramid(self.left, 4) self.right_pyramid = self.scale_pyramid(self.right, 4) self.central_pyramid = self.scale_pyramid(self.central, 4) with tf.variable_scope('shared-encoder'): features_cr = self.build_encoder(self.central, model_name=net) features_cl = features_cr with tf.variable_scope('encoder-C2R'): self.disp_c2r = self.build_decoder(features_cr, model_name=net) with tf.variable_scope('encoder-C2L'): self.disp_c2l = self.build_decoder(features_cl, model_name=net) def build_encoder(self, model_input, model_name='vgg'): with tf.variable_scope('encoder'): if model_name == 'vgg': conv1 = conv_block(model_input, 32, 7) conv2 = conv_block(conv1, 64, 5) conv3 = conv_block(conv2, 128, 3) conv4 = conv_block(conv3, 256, 3) conv5 = conv_block(conv4, 512, 3) conv6 = conv_block(conv5, 512, 3) conv7 = conv_block(conv6, 512, 3) return conv7, conv1, conv2, conv3, conv4, conv5, conv6 elif model_name == 'resnet50': conv1 = conv(model_input, 64, 7, 2) pool1 = maxpool(conv1, 3) conv2 = resblock(pool1, 64, 3) conv3 = resblock(conv2, 128, 4) conv4 = resblock(conv3, 256, 6) conv5 = resblock(conv4, 512, 3) return conv5, conv1, pool1, conv2, conv3, conv4 def build_decoder(self, skip, model_name='vgg'): with tf.variable_scope('decoder'): if model_name == 'vgg': upconv7 = upconv(skip[0], 512, 3, 2) concat7 = tf.concat([upconv7, skip[6]], 3) iconv7 = conv(concat7, 512, 3, 1) upconv6 = upconv(iconv7, 512, 3, 2) concat6 = tf.concat([upconv6, skip[5]], 3) iconv6 = conv(concat6, 512, 3, 1) upconv5 = upconv(iconv6, 256, 3, 2) concat5 = tf.concat([upconv5, skip[4]], 3) iconv5 = conv(concat5, 256, 3, 1) upconv4 = upconv(iconv5, 128, 3, 2) concat4 = tf.concat([upconv4, skip[3]], 3) iconv4 = conv(concat4, 128, 3, 1) disp4 = get_disp(iconv4) udisp4 = upsample_nn(disp4, 2) upconv3 = upconv(iconv4, 64, 3, 2) concat3 = tf.concat([upconv3, skip[2], udisp4], 3) iconv3 = conv(concat3, 64, 3, 1) disp3 = get_disp(iconv3) udisp3 = upsample_nn(disp3, 2) upconv2 = upconv(iconv3, 32, 3, 2) concat2 = tf.concat([upconv2, skip[1], udisp3], 3) iconv2 = conv(concat2, 32, 3, 1) disp2 = get_disp(iconv2) udisp2 = upsample_nn(disp2, 2) upconv1 = upconv(iconv2, 16, 3, 2) concat1 = tf.concat([upconv1, udisp2], 3) iconv1 = conv(concat1, 16, 3, 1) disp1 = get_disp(iconv1) elif model_name == 'resnet50': upconv6 = upconv(skip[0], 512, 3, 2) concat6 = tf.concat([upconv6, skip[5]], 3) iconv6 = conv(concat6, 512, 3, 1) upconv5 = upconv(iconv6, 256, 3, 2) concat5 = tf.concat([upconv5, skip[4]], 3) iconv5 = conv(concat5, 256, 3, 1) upconv4 = upconv(iconv5, 128, 3, 2) concat4 = tf.concat([upconv4, skip[3]], 3) iconv4 = conv(concat4, 128, 3, 1) disp4 = get_disp(iconv4) udisp4 = upsample_nn(disp4, 2) upconv3 = upconv(iconv4, 64, 3, 2) concat3 = tf.concat([upconv3, skip[2], udisp4], 3) iconv3 = conv(concat3, 64, 3, 1) disp3 = get_disp(iconv3) udisp3 = upsample_nn(disp3, 2) upconv2 = upconv(iconv3, 32, 3, 2) concat2 = tf.concat([upconv2, skip[1], udisp3], 3) iconv2 = conv(concat2, 32, 3, 1) disp2 = get_disp(iconv2) udisp2 = upsample_nn(disp2, 2) upconv1 = upconv(iconv2, 16, 3, 2) concat1 = tf.concat([upconv1, udisp2], 3) iconv1 = conv(concat1, 16, 3, 1) disp1 = get_disp(iconv1) return disp1, disp2, disp3, disp4 def build_outputs(self): with tf.variable_scope('disparities'): self.disp_lc = [tf.expand_dims(d[:, :, :, 0], 3) for d in self. disp_c2l] self.disp_cl = [tf.expand_dims(d[:, :, :, 1], 3) for d in self. disp_c2l] self.disp_cr = [tf.expand_dims(d[:, :, :, 0], 3) for d in self. disp_c2r] self.disp_rc = [tf.expand_dims(d[:, :, :, 1], 3) for d in self. disp_c2r] with tf.variable_scope('images'): self.left_est = [self.generate_image_left(self.central_pyramid[ i], self.disp_lc[i]) for i in range(4)] self.cl_est = [self.generate_image_right(self.left_pyramid[i], self.disp_cl[i]) for i in range(4)] self.cr_est = [self.generate_image_left(self.right_pyramid[i], self.disp_cr[i]) for i in range(4)] self.right_est = [self.generate_image_right(self. central_pyramid[i], self.disp_rc[i]) for i in range(4)] with tf.variable_scope('left-right'): self.cl_to_lc_disp = [self.generate_image_left(self.disp_cl[i], self.disp_lc[i]) for i in range(4)] self.lc_to_cl_disp = [self.generate_image_right(self.disp_lc[i], self.disp_cl[i]) for i in range(4)] self.rc_to_cr_disp = [self.generate_image_left(self.disp_rc[i], self.disp_cr[i]) for i in range(4)] self.cr_to_rc_disp = [self.generate_image_right(self.disp_cr[i], self.disp_rc[i]) for i in range(4)] with tf.variable_scope('smoothness'): self.disp_lc_smoothness = self.get_disparity_smoothness(self. disp_lc, self.left_pyramid) self.disp_cl_smoothness = self.get_disparity_smoothness(self. disp_cl, self.central_pyramid) self.disp_cr_smoothness = self.get_disparity_smoothness(self. disp_cr, self.central_pyramid) self.disp_rc_smoothness = self.get_disparity_smoothness(self. disp_rc, self.right_pyramid) def build_losses(self): with tf.variable_scope('losses', reuse=self.reuse_variables): self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for i in range(4)] self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in self.l1_left] self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[ i]) for i in range(4)] self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in self.l1_right] self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for i in range(4)] self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in self.l1_cl] self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for i in range(4)] self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in self.l1_cr] self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid [i]) for i in range(4)] self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left] self.ssim_right = [self.SSIM(self.right_est[i], self. right_pyramid[i]) for i in range(4)] self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right] self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[ i]) for i in range(4)] self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl] self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[ i]) for i in range(4)] self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr] self.image_loss_right = [(self.params.alpha_image_loss * self. ssim_loss_right[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_right[i]) for i in range(4)] self.image_loss_left = [(self.params.alpha_image_loss * self. ssim_loss_left[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_left[i]) for i in range(4)] self.image_loss_cl = [(self.params.alpha_image_loss * self. ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self .l1_reconstruction_loss_cl[i]) for i in range(4)] self.image_loss_cr = [(self.params.alpha_image_loss * self. ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self .l1_reconstruction_loss_cr[i]) for i in range(4)] self.image_loss = tf.add_n(self.image_loss_left + self. image_loss_cl + self.image_loss_right + self.image_loss_cr) self.image_loss_L = tf.add_n(self.image_loss_left + self. image_loss_cl) self.image_loss_R = tf.add_n(self.image_loss_right + self. image_loss_cr) self.disp_lc_loss = [(tf.reduce_mean(tf.abs(self. disp_lc_smoothness[i])) / 2 ** i) for i in range(4)] self.disp_cl_loss = [(tf.reduce_mean(tf.abs(self. disp_cl_smoothness[i])) / 2 ** i) for i in range(4)] self.disp_rc_loss = [(tf.reduce_mean(tf.abs(self. disp_rc_smoothness[i])) / 2 ** i) for i in range(4)] self.disp_cr_loss = [(tf.reduce_mean(tf.abs(self. disp_cr_smoothness[i])) / 2 ** i) for i in range(4)] self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self. disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss) self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self. disp_cl_loss) self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self. disp_cr_loss) self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] - self.disp_lc[i])) for i in range(4)] self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] - self.disp_cl[i])) for i in range(4)] self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] - self.disp_rc[i])) for i in range(4)] self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] - self.disp_cr[i])) for i in range(4)] self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss + self.lr_rc_loss + self.lr_cr_loss) self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss) self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss) self.central_disparity_dif = [tf.reduce_mean(tf.abs(self. disp_cl[i] - self.disp_cr[i])) for i in range(4)] self.central_disparity_loss = tf.add_n(self.central_disparity_dif) self.total_loss = (self.image_loss + self.params. disp_gradient_loss_weight * self.disp_gradient_loss + self. params.lr_loss_weight * self.lr_loss + self. central_disparity_loss) self.total_loss_L = (self.image_loss_L + self.params. disp_gradient_loss_weight * self.disp_gradient_loss_L + self.params.lr_loss_weight * self.lr_loss_L) self.total_loss_R = (self.image_loss_R + self.params. disp_gradient_loss_weight * self.disp_gradient_loss_R + self.params.lr_loss_weight * self.lr_loss_R) def build_summaries(self): with tf.device('/cpu:0'): for i in range(4): tf.summary.scalar('ssim_loss_' + str(i), self. ssim_loss_left[i] + self.ssim_loss_cl[i] + self. ssim_loss_right[i] + self.ssim_loss_cr[i], collections= self.model_collection) tf.summary.scalar('l1_loss_' + str(i), self. l1_reconstruction_loss_left[i] + self. l1_reconstruction_loss_cl[i] + self. l1_reconstruction_loss_right[i] + self. l1_reconstruction_loss_cr[i], collections=self. model_collection) tf.summary.scalar('image_loss_' + str(i), self. image_loss_left[i] + self.image_loss_cl[i] + self. image_loss_right[i] + self.image_loss_cr[i], collections=self.model_collection) tf.summary.scalar('disp_gradient_loss_' + str(i), self. disp_lc_loss[i] + self.disp_cl_loss[i] + self. disp_rc_loss[i] + self.disp_cr_loss[i], collections= self.model_collection) tf.summary.scalar('lr_loss_' + str(i), self.lr_lc_loss[i] + self.lr_cl_loss[i] + self.lr_rc_loss[i] + self. lr_cr_loss[i], collections=self.model_collection) tf.summary.scalar('total_loss_L', self.total_loss_L, collections=self.model_collection) tf.summary.scalar('total_loss_R', self.total_loss_R, collections=self.model_collection) tf.summary.scalar('central_disparity_loss', self. central_disparity_loss, collections=self.model_collection) tf.summary.image('disp_left_est_' + str(i), self.disp_lc[i], max_outputs=4, collections=self.model_collection) tf.summary.image('disp_cl_est_' + str(i), self.disp_cl[i], max_outputs=4, collections=self.model_collection) tf.summary.image('disp_right_est_' + str(i), self.disp_rc[i ], max_outputs=4, collections=self.model_collection) tf.summary.image('disp_cr_est_' + str(i), self.disp_cr[i], max_outputs=4, collections=self.model_collection) tf.summary.image('left_pyramid_' + str(i), self. left_pyramid[i], max_outputs=4, collections=self. model_collection) tf.summary.image('central_pyramid_' + str(i), self. central_pyramid[i], max_outputs=4, collections=self. model_collection) tf.summary.image('right_pyramid_' + str(i), self. right_pyramid[i], max_outputs=4, collections=self. model_collection) tf.summary.image('left_est_' + str(i), self.left_est[i], max_outputs=4, collections=self.model_collection) tf.summary.image('cr_est_' + str(i), self.cr_est[i], max_outputs=4, collections=self.model_collection) tf.summary.image('cl_est_' + str(i), self.cl_est[i], max_outputs=4, collections=self.model_collection) tf.summary.image('right_est_' + str(i), self.right_est[i], max_outputs=4, collections=self.model_collection) if self.params.full_summary: tf.summary.image('l1_right_' + str(i), self.l1_right[i], max_outputs=4, collections=self.model_collection) tf.summary.image('l1_cr_' + str(i), self.l1_cr[i], max_outputs=4, collections=self.model_collection) if self.params.full_summary: tf.summary.image('left', self.left, max_outputs=4, collections=self.model_collection) tf.summary.image('right', self.right, max_outputs=4, collections=self.model_collection) tf.summary.image('central', self.central, max_outputs=4, collections=self.model_collection) <|reserved_special_token_1|> <|reserved_special_token_0|> class trinet(object): def __init__(self, params, mode, left, central, right, reuse_variables= None, model_index=0, net='vgg'): self.params = params self.mode = mode self.model_collection = ['model_0'] self.left = left self.right = right self.central = central self.reuse_variables = reuse_variables self.model_index = model_index self.build_model(net) self.build_outputs() if self.mode == 'test': return self.build_losses() self.build_summaries() def gradient_x(self, img): gx = img[:, :, :-1, :] - img[:, :, 1:, :] return gx def gradient_y(self, img): gy = img[:, :-1, :, :] - img[:, 1:, :, :] return gy def scale_pyramid(self, img, num_scales): scaled_imgs = [img] s = tf.shape(img) h = s[1] w = s[2] for i in range(num_scales - 1): ratio = 2 ** (i + 1) nh = h // ratio nw = w // ratio scaled_imgs.append(tf.image.resize_area(img, [nh, nw])) return scaled_imgs def generate_image_left(self, img, disp): return bilinear_sampler_1d_h(img, -disp) def generate_image_right(self, img, disp): return bilinear_sampler_1d_h(img, disp) def SSIM(self, x, y): C1 = 0.01 ** 2 C2 = 0.03 ** 2 mu_x = slim.avg_pool2d(x, 3, 1, 'VALID') mu_y = slim.avg_pool2d(y, 3, 1, 'VALID') sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2 sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2 sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2) SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2) SSIM = SSIM_n / SSIM_d return tf.clip_by_value((1 - SSIM) / 2, 0, 1) def get_disparity_smoothness(self, disp, pyramid): disp_gradients_x = [self.gradient_x(d) for d in disp] disp_gradients_y = [self.gradient_y(d) for d in disp] image_gradients_x = [self.gradient_x(img) for img in pyramid] image_gradients_y = [self.gradient_y(img) for img in pyramid] weights_x = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for g in image_gradients_x] weights_y = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for g in image_gradients_y] smoothness_x = [(disp_gradients_x[i] * weights_x[i]) for i in range(4)] smoothness_y = [(disp_gradients_y[i] * weights_y[i]) for i in range(4)] return smoothness_x + smoothness_y def build_model(self, net): with tf.variable_scope('model', reuse=self.reuse_variables) as scope: self.left_pyramid = self.scale_pyramid(self.left, 4) self.right_pyramid = self.scale_pyramid(self.right, 4) self.central_pyramid = self.scale_pyramid(self.central, 4) with tf.variable_scope('shared-encoder'): features_cr = self.build_encoder(self.central, model_name=net) features_cl = features_cr with tf.variable_scope('encoder-C2R'): self.disp_c2r = self.build_decoder(features_cr, model_name=net) with tf.variable_scope('encoder-C2L'): self.disp_c2l = self.build_decoder(features_cl, model_name=net) def build_encoder(self, model_input, model_name='vgg'): with tf.variable_scope('encoder'): if model_name == 'vgg': conv1 = conv_block(model_input, 32, 7) conv2 = conv_block(conv1, 64, 5) conv3 = conv_block(conv2, 128, 3) conv4 = conv_block(conv3, 256, 3) conv5 = conv_block(conv4, 512, 3) conv6 = conv_block(conv5, 512, 3) conv7 = conv_block(conv6, 512, 3) return conv7, conv1, conv2, conv3, conv4, conv5, conv6 elif model_name == 'resnet50': conv1 = conv(model_input, 64, 7, 2) pool1 = maxpool(conv1, 3) conv2 = resblock(pool1, 64, 3) conv3 = resblock(conv2, 128, 4) conv4 = resblock(conv3, 256, 6) conv5 = resblock(conv4, 512, 3) return conv5, conv1, pool1, conv2, conv3, conv4 def build_decoder(self, skip, model_name='vgg'): with tf.variable_scope('decoder'): if model_name == 'vgg': upconv7 = upconv(skip[0], 512, 3, 2) concat7 = tf.concat([upconv7, skip[6]], 3) iconv7 = conv(concat7, 512, 3, 1) upconv6 = upconv(iconv7, 512, 3, 2) concat6 = tf.concat([upconv6, skip[5]], 3) iconv6 = conv(concat6, 512, 3, 1) upconv5 = upconv(iconv6, 256, 3, 2) concat5 = tf.concat([upconv5, skip[4]], 3) iconv5 = conv(concat5, 256, 3, 1) upconv4 = upconv(iconv5, 128, 3, 2) concat4 = tf.concat([upconv4, skip[3]], 3) iconv4 = conv(concat4, 128, 3, 1) disp4 = get_disp(iconv4) udisp4 = upsample_nn(disp4, 2) upconv3 = upconv(iconv4, 64, 3, 2) concat3 = tf.concat([upconv3, skip[2], udisp4], 3) iconv3 = conv(concat3, 64, 3, 1) disp3 = get_disp(iconv3) udisp3 = upsample_nn(disp3, 2) upconv2 = upconv(iconv3, 32, 3, 2) concat2 = tf.concat([upconv2, skip[1], udisp3], 3) iconv2 = conv(concat2, 32, 3, 1) disp2 = get_disp(iconv2) udisp2 = upsample_nn(disp2, 2) upconv1 = upconv(iconv2, 16, 3, 2) concat1 = tf.concat([upconv1, udisp2], 3) iconv1 = conv(concat1, 16, 3, 1) disp1 = get_disp(iconv1) elif model_name == 'resnet50': upconv6 = upconv(skip[0], 512, 3, 2) concat6 = tf.concat([upconv6, skip[5]], 3) iconv6 = conv(concat6, 512, 3, 1) upconv5 = upconv(iconv6, 256, 3, 2) concat5 = tf.concat([upconv5, skip[4]], 3) iconv5 = conv(concat5, 256, 3, 1) upconv4 = upconv(iconv5, 128, 3, 2) concat4 = tf.concat([upconv4, skip[3]], 3) iconv4 = conv(concat4, 128, 3, 1) disp4 = get_disp(iconv4) udisp4 = upsample_nn(disp4, 2) upconv3 = upconv(iconv4, 64, 3, 2) concat3 = tf.concat([upconv3, skip[2], udisp4], 3) iconv3 = conv(concat3, 64, 3, 1) disp3 = get_disp(iconv3) udisp3 = upsample_nn(disp3, 2) upconv2 = upconv(iconv3, 32, 3, 2) concat2 = tf.concat([upconv2, skip[1], udisp3], 3) iconv2 = conv(concat2, 32, 3, 1) disp2 = get_disp(iconv2) udisp2 = upsample_nn(disp2, 2) upconv1 = upconv(iconv2, 16, 3, 2) concat1 = tf.concat([upconv1, udisp2], 3) iconv1 = conv(concat1, 16, 3, 1) disp1 = get_disp(iconv1) return disp1, disp2, disp3, disp4 def build_outputs(self): with tf.variable_scope('disparities'): self.disp_lc = [tf.expand_dims(d[:, :, :, 0], 3) for d in self. disp_c2l] self.disp_cl = [tf.expand_dims(d[:, :, :, 1], 3) for d in self. disp_c2l] self.disp_cr = [tf.expand_dims(d[:, :, :, 0], 3) for d in self. disp_c2r] self.disp_rc = [tf.expand_dims(d[:, :, :, 1], 3) for d in self. disp_c2r] with tf.variable_scope('images'): self.left_est = [self.generate_image_left(self.central_pyramid[ i], self.disp_lc[i]) for i in range(4)] self.cl_est = [self.generate_image_right(self.left_pyramid[i], self.disp_cl[i]) for i in range(4)] self.cr_est = [self.generate_image_left(self.right_pyramid[i], self.disp_cr[i]) for i in range(4)] self.right_est = [self.generate_image_right(self. central_pyramid[i], self.disp_rc[i]) for i in range(4)] with tf.variable_scope('left-right'): self.cl_to_lc_disp = [self.generate_image_left(self.disp_cl[i], self.disp_lc[i]) for i in range(4)] self.lc_to_cl_disp = [self.generate_image_right(self.disp_lc[i], self.disp_cl[i]) for i in range(4)] self.rc_to_cr_disp = [self.generate_image_left(self.disp_rc[i], self.disp_cr[i]) for i in range(4)] self.cr_to_rc_disp = [self.generate_image_right(self.disp_cr[i], self.disp_rc[i]) for i in range(4)] with tf.variable_scope('smoothness'): self.disp_lc_smoothness = self.get_disparity_smoothness(self. disp_lc, self.left_pyramid) self.disp_cl_smoothness = self.get_disparity_smoothness(self. disp_cl, self.central_pyramid) self.disp_cr_smoothness = self.get_disparity_smoothness(self. disp_cr, self.central_pyramid) self.disp_rc_smoothness = self.get_disparity_smoothness(self. disp_rc, self.right_pyramid) def build_losses(self): with tf.variable_scope('losses', reuse=self.reuse_variables): self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for i in range(4)] self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in self.l1_left] self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[ i]) for i in range(4)] self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in self.l1_right] self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for i in range(4)] self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in self.l1_cl] self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for i in range(4)] self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in self.l1_cr] self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid [i]) for i in range(4)] self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left] self.ssim_right = [self.SSIM(self.right_est[i], self. right_pyramid[i]) for i in range(4)] self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right] self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[ i]) for i in range(4)] self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl] self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[ i]) for i in range(4)] self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr] self.image_loss_right = [(self.params.alpha_image_loss * self. ssim_loss_right[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_right[i]) for i in range(4)] self.image_loss_left = [(self.params.alpha_image_loss * self. ssim_loss_left[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_left[i]) for i in range(4)] self.image_loss_cl = [(self.params.alpha_image_loss * self. ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self .l1_reconstruction_loss_cl[i]) for i in range(4)] self.image_loss_cr = [(self.params.alpha_image_loss * self. ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self .l1_reconstruction_loss_cr[i]) for i in range(4)] self.image_loss = tf.add_n(self.image_loss_left + self. image_loss_cl + self.image_loss_right + self.image_loss_cr) self.image_loss_L = tf.add_n(self.image_loss_left + self. image_loss_cl) self.image_loss_R = tf.add_n(self.image_loss_right + self. image_loss_cr) self.disp_lc_loss = [(tf.reduce_mean(tf.abs(self. disp_lc_smoothness[i])) / 2 ** i) for i in range(4)] self.disp_cl_loss = [(tf.reduce_mean(tf.abs(self. disp_cl_smoothness[i])) / 2 ** i) for i in range(4)] self.disp_rc_loss = [(tf.reduce_mean(tf.abs(self. disp_rc_smoothness[i])) / 2 ** i) for i in range(4)] self.disp_cr_loss = [(tf.reduce_mean(tf.abs(self. disp_cr_smoothness[i])) / 2 ** i) for i in range(4)] self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self. disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss) self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self. disp_cl_loss) self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self. disp_cr_loss) self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] - self.disp_lc[i])) for i in range(4)] self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] - self.disp_cl[i])) for i in range(4)] self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] - self.disp_rc[i])) for i in range(4)] self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] - self.disp_cr[i])) for i in range(4)] self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss + self.lr_rc_loss + self.lr_cr_loss) self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss) self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss) self.central_disparity_dif = [tf.reduce_mean(tf.abs(self. disp_cl[i] - self.disp_cr[i])) for i in range(4)] self.central_disparity_loss = tf.add_n(self.central_disparity_dif) self.total_loss = (self.image_loss + self.params. disp_gradient_loss_weight * self.disp_gradient_loss + self. params.lr_loss_weight * self.lr_loss + self. central_disparity_loss) self.total_loss_L = (self.image_loss_L + self.params. disp_gradient_loss_weight * self.disp_gradient_loss_L + self.params.lr_loss_weight * self.lr_loss_L) self.total_loss_R = (self.image_loss_R + self.params. disp_gradient_loss_weight * self.disp_gradient_loss_R + self.params.lr_loss_weight * self.lr_loss_R) def build_summaries(self): with tf.device('/cpu:0'): for i in range(4): tf.summary.scalar('ssim_loss_' + str(i), self. ssim_loss_left[i] + self.ssim_loss_cl[i] + self. ssim_loss_right[i] + self.ssim_loss_cr[i], collections= self.model_collection) tf.summary.scalar('l1_loss_' + str(i), self. l1_reconstruction_loss_left[i] + self. l1_reconstruction_loss_cl[i] + self. l1_reconstruction_loss_right[i] + self. l1_reconstruction_loss_cr[i], collections=self. model_collection) tf.summary.scalar('image_loss_' + str(i), self. image_loss_left[i] + self.image_loss_cl[i] + self. image_loss_right[i] + self.image_loss_cr[i], collections=self.model_collection) tf.summary.scalar('disp_gradient_loss_' + str(i), self. disp_lc_loss[i] + self.disp_cl_loss[i] + self. disp_rc_loss[i] + self.disp_cr_loss[i], collections= self.model_collection) tf.summary.scalar('lr_loss_' + str(i), self.lr_lc_loss[i] + self.lr_cl_loss[i] + self.lr_rc_loss[i] + self. lr_cr_loss[i], collections=self.model_collection) tf.summary.scalar('total_loss_L', self.total_loss_L, collections=self.model_collection) tf.summary.scalar('total_loss_R', self.total_loss_R, collections=self.model_collection) tf.summary.scalar('central_disparity_loss', self. central_disparity_loss, collections=self.model_collection) tf.summary.image('disp_left_est_' + str(i), self.disp_lc[i], max_outputs=4, collections=self.model_collection) tf.summary.image('disp_cl_est_' + str(i), self.disp_cl[i], max_outputs=4, collections=self.model_collection) tf.summary.image('disp_right_est_' + str(i), self.disp_rc[i ], max_outputs=4, collections=self.model_collection) tf.summary.image('disp_cr_est_' + str(i), self.disp_cr[i], max_outputs=4, collections=self.model_collection) tf.summary.image('left_pyramid_' + str(i), self. left_pyramid[i], max_outputs=4, collections=self. model_collection) tf.summary.image('central_pyramid_' + str(i), self. central_pyramid[i], max_outputs=4, collections=self. model_collection) tf.summary.image('right_pyramid_' + str(i), self. right_pyramid[i], max_outputs=4, collections=self. model_collection) tf.summary.image('left_est_' + str(i), self.left_est[i], max_outputs=4, collections=self.model_collection) tf.summary.image('cr_est_' + str(i), self.cr_est[i], max_outputs=4, collections=self.model_collection) tf.summary.image('cl_est_' + str(i), self.cl_est[i], max_outputs=4, collections=self.model_collection) tf.summary.image('right_est_' + str(i), self.right_est[i], max_outputs=4, collections=self.model_collection) if self.params.full_summary: tf.summary.image('l1_right_' + str(i), self.l1_right[i], max_outputs=4, collections=self.model_collection) tf.summary.image('l1_cr_' + str(i), self.l1_cr[i], max_outputs=4, collections=self.model_collection) if self.params.full_summary: tf.summary.image('left', self.left, max_outputs=4, collections=self.model_collection) tf.summary.image('right', self.right, max_outputs=4, collections=self.model_collection) tf.summary.image('central', self.central, max_outputs=4, collections=self.model_collection) <|reserved_special_token_1|> from layers import * from utils import * from collections import namedtuple trinet_parameters = namedtuple('parameters', 'encoder, height, width, batch_size, num_threads, num_epochs, alpha_image_loss, disp_gradient_loss_weight, lr_loss_weight, full_summary' ) class trinet(object): def __init__(self, params, mode, left, central, right, reuse_variables= None, model_index=0, net='vgg'): self.params = params self.mode = mode self.model_collection = ['model_0'] self.left = left self.right = right self.central = central self.reuse_variables = reuse_variables self.model_index = model_index self.build_model(net) self.build_outputs() if self.mode == 'test': return self.build_losses() self.build_summaries() def gradient_x(self, img): gx = img[:, :, :-1, :] - img[:, :, 1:, :] return gx def gradient_y(self, img): gy = img[:, :-1, :, :] - img[:, 1:, :, :] return gy def scale_pyramid(self, img, num_scales): scaled_imgs = [img] s = tf.shape(img) h = s[1] w = s[2] for i in range(num_scales - 1): ratio = 2 ** (i + 1) nh = h // ratio nw = w // ratio scaled_imgs.append(tf.image.resize_area(img, [nh, nw])) return scaled_imgs def generate_image_left(self, img, disp): return bilinear_sampler_1d_h(img, -disp) def generate_image_right(self, img, disp): return bilinear_sampler_1d_h(img, disp) def SSIM(self, x, y): C1 = 0.01 ** 2 C2 = 0.03 ** 2 mu_x = slim.avg_pool2d(x, 3, 1, 'VALID') mu_y = slim.avg_pool2d(y, 3, 1, 'VALID') sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2 sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2 sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2) SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2) SSIM = SSIM_n / SSIM_d return tf.clip_by_value((1 - SSIM) / 2, 0, 1) def get_disparity_smoothness(self, disp, pyramid): disp_gradients_x = [self.gradient_x(d) for d in disp] disp_gradients_y = [self.gradient_y(d) for d in disp] image_gradients_x = [self.gradient_x(img) for img in pyramid] image_gradients_y = [self.gradient_y(img) for img in pyramid] weights_x = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for g in image_gradients_x] weights_y = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for g in image_gradients_y] smoothness_x = [(disp_gradients_x[i] * weights_x[i]) for i in range(4)] smoothness_y = [(disp_gradients_y[i] * weights_y[i]) for i in range(4)] return smoothness_x + smoothness_y def build_model(self, net): with tf.variable_scope('model', reuse=self.reuse_variables) as scope: self.left_pyramid = self.scale_pyramid(self.left, 4) self.right_pyramid = self.scale_pyramid(self.right, 4) self.central_pyramid = self.scale_pyramid(self.central, 4) with tf.variable_scope('shared-encoder'): features_cr = self.build_encoder(self.central, model_name=net) features_cl = features_cr with tf.variable_scope('encoder-C2R'): self.disp_c2r = self.build_decoder(features_cr, model_name=net) with tf.variable_scope('encoder-C2L'): self.disp_c2l = self.build_decoder(features_cl, model_name=net) def build_encoder(self, model_input, model_name='vgg'): with tf.variable_scope('encoder'): if model_name == 'vgg': conv1 = conv_block(model_input, 32, 7) conv2 = conv_block(conv1, 64, 5) conv3 = conv_block(conv2, 128, 3) conv4 = conv_block(conv3, 256, 3) conv5 = conv_block(conv4, 512, 3) conv6 = conv_block(conv5, 512, 3) conv7 = conv_block(conv6, 512, 3) return conv7, conv1, conv2, conv3, conv4, conv5, conv6 elif model_name == 'resnet50': conv1 = conv(model_input, 64, 7, 2) pool1 = maxpool(conv1, 3) conv2 = resblock(pool1, 64, 3) conv3 = resblock(conv2, 128, 4) conv4 = resblock(conv3, 256, 6) conv5 = resblock(conv4, 512, 3) return conv5, conv1, pool1, conv2, conv3, conv4 def build_decoder(self, skip, model_name='vgg'): with tf.variable_scope('decoder'): if model_name == 'vgg': upconv7 = upconv(skip[0], 512, 3, 2) concat7 = tf.concat([upconv7, skip[6]], 3) iconv7 = conv(concat7, 512, 3, 1) upconv6 = upconv(iconv7, 512, 3, 2) concat6 = tf.concat([upconv6, skip[5]], 3) iconv6 = conv(concat6, 512, 3, 1) upconv5 = upconv(iconv6, 256, 3, 2) concat5 = tf.concat([upconv5, skip[4]], 3) iconv5 = conv(concat5, 256, 3, 1) upconv4 = upconv(iconv5, 128, 3, 2) concat4 = tf.concat([upconv4, skip[3]], 3) iconv4 = conv(concat4, 128, 3, 1) disp4 = get_disp(iconv4) udisp4 = upsample_nn(disp4, 2) upconv3 = upconv(iconv4, 64, 3, 2) concat3 = tf.concat([upconv3, skip[2], udisp4], 3) iconv3 = conv(concat3, 64, 3, 1) disp3 = get_disp(iconv3) udisp3 = upsample_nn(disp3, 2) upconv2 = upconv(iconv3, 32, 3, 2) concat2 = tf.concat([upconv2, skip[1], udisp3], 3) iconv2 = conv(concat2, 32, 3, 1) disp2 = get_disp(iconv2) udisp2 = upsample_nn(disp2, 2) upconv1 = upconv(iconv2, 16, 3, 2) concat1 = tf.concat([upconv1, udisp2], 3) iconv1 = conv(concat1, 16, 3, 1) disp1 = get_disp(iconv1) elif model_name == 'resnet50': upconv6 = upconv(skip[0], 512, 3, 2) concat6 = tf.concat([upconv6, skip[5]], 3) iconv6 = conv(concat6, 512, 3, 1) upconv5 = upconv(iconv6, 256, 3, 2) concat5 = tf.concat([upconv5, skip[4]], 3) iconv5 = conv(concat5, 256, 3, 1) upconv4 = upconv(iconv5, 128, 3, 2) concat4 = tf.concat([upconv4, skip[3]], 3) iconv4 = conv(concat4, 128, 3, 1) disp4 = get_disp(iconv4) udisp4 = upsample_nn(disp4, 2) upconv3 = upconv(iconv4, 64, 3, 2) concat3 = tf.concat([upconv3, skip[2], udisp4], 3) iconv3 = conv(concat3, 64, 3, 1) disp3 = get_disp(iconv3) udisp3 = upsample_nn(disp3, 2) upconv2 = upconv(iconv3, 32, 3, 2) concat2 = tf.concat([upconv2, skip[1], udisp3], 3) iconv2 = conv(concat2, 32, 3, 1) disp2 = get_disp(iconv2) udisp2 = upsample_nn(disp2, 2) upconv1 = upconv(iconv2, 16, 3, 2) concat1 = tf.concat([upconv1, udisp2], 3) iconv1 = conv(concat1, 16, 3, 1) disp1 = get_disp(iconv1) return disp1, disp2, disp3, disp4 def build_outputs(self): with tf.variable_scope('disparities'): self.disp_lc = [tf.expand_dims(d[:, :, :, 0], 3) for d in self. disp_c2l] self.disp_cl = [tf.expand_dims(d[:, :, :, 1], 3) for d in self. disp_c2l] self.disp_cr = [tf.expand_dims(d[:, :, :, 0], 3) for d in self. disp_c2r] self.disp_rc = [tf.expand_dims(d[:, :, :, 1], 3) for d in self. disp_c2r] with tf.variable_scope('images'): self.left_est = [self.generate_image_left(self.central_pyramid[ i], self.disp_lc[i]) for i in range(4)] self.cl_est = [self.generate_image_right(self.left_pyramid[i], self.disp_cl[i]) for i in range(4)] self.cr_est = [self.generate_image_left(self.right_pyramid[i], self.disp_cr[i]) for i in range(4)] self.right_est = [self.generate_image_right(self. central_pyramid[i], self.disp_rc[i]) for i in range(4)] with tf.variable_scope('left-right'): self.cl_to_lc_disp = [self.generate_image_left(self.disp_cl[i], self.disp_lc[i]) for i in range(4)] self.lc_to_cl_disp = [self.generate_image_right(self.disp_lc[i], self.disp_cl[i]) for i in range(4)] self.rc_to_cr_disp = [self.generate_image_left(self.disp_rc[i], self.disp_cr[i]) for i in range(4)] self.cr_to_rc_disp = [self.generate_image_right(self.disp_cr[i], self.disp_rc[i]) for i in range(4)] with tf.variable_scope('smoothness'): self.disp_lc_smoothness = self.get_disparity_smoothness(self. disp_lc, self.left_pyramid) self.disp_cl_smoothness = self.get_disparity_smoothness(self. disp_cl, self.central_pyramid) self.disp_cr_smoothness = self.get_disparity_smoothness(self. disp_cr, self.central_pyramid) self.disp_rc_smoothness = self.get_disparity_smoothness(self. disp_rc, self.right_pyramid) def build_losses(self): with tf.variable_scope('losses', reuse=self.reuse_variables): self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for i in range(4)] self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in self.l1_left] self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[ i]) for i in range(4)] self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in self.l1_right] self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for i in range(4)] self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in self.l1_cl] self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for i in range(4)] self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in self.l1_cr] self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid [i]) for i in range(4)] self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left] self.ssim_right = [self.SSIM(self.right_est[i], self. right_pyramid[i]) for i in range(4)] self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right] self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[ i]) for i in range(4)] self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl] self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[ i]) for i in range(4)] self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr] self.image_loss_right = [(self.params.alpha_image_loss * self. ssim_loss_right[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_right[i]) for i in range(4)] self.image_loss_left = [(self.params.alpha_image_loss * self. ssim_loss_left[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_left[i]) for i in range(4)] self.image_loss_cl = [(self.params.alpha_image_loss * self. ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self .l1_reconstruction_loss_cl[i]) for i in range(4)] self.image_loss_cr = [(self.params.alpha_image_loss * self. ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self .l1_reconstruction_loss_cr[i]) for i in range(4)] self.image_loss = tf.add_n(self.image_loss_left + self. image_loss_cl + self.image_loss_right + self.image_loss_cr) self.image_loss_L = tf.add_n(self.image_loss_left + self. image_loss_cl) self.image_loss_R = tf.add_n(self.image_loss_right + self. image_loss_cr) self.disp_lc_loss = [(tf.reduce_mean(tf.abs(self. disp_lc_smoothness[i])) / 2 ** i) for i in range(4)] self.disp_cl_loss = [(tf.reduce_mean(tf.abs(self. disp_cl_smoothness[i])) / 2 ** i) for i in range(4)] self.disp_rc_loss = [(tf.reduce_mean(tf.abs(self. disp_rc_smoothness[i])) / 2 ** i) for i in range(4)] self.disp_cr_loss = [(tf.reduce_mean(tf.abs(self. disp_cr_smoothness[i])) / 2 ** i) for i in range(4)] self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self. disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss) self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self. disp_cl_loss) self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self. disp_cr_loss) self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] - self.disp_lc[i])) for i in range(4)] self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] - self.disp_cl[i])) for i in range(4)] self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] - self.disp_rc[i])) for i in range(4)] self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] - self.disp_cr[i])) for i in range(4)] self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss + self.lr_rc_loss + self.lr_cr_loss) self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss) self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss) self.central_disparity_dif = [tf.reduce_mean(tf.abs(self. disp_cl[i] - self.disp_cr[i])) for i in range(4)] self.central_disparity_loss = tf.add_n(self.central_disparity_dif) self.total_loss = (self.image_loss + self.params. disp_gradient_loss_weight * self.disp_gradient_loss + self. params.lr_loss_weight * self.lr_loss + self. central_disparity_loss) self.total_loss_L = (self.image_loss_L + self.params. disp_gradient_loss_weight * self.disp_gradient_loss_L + self.params.lr_loss_weight * self.lr_loss_L) self.total_loss_R = (self.image_loss_R + self.params. disp_gradient_loss_weight * self.disp_gradient_loss_R + self.params.lr_loss_weight * self.lr_loss_R) def build_summaries(self): with tf.device('/cpu:0'): for i in range(4): tf.summary.scalar('ssim_loss_' + str(i), self. ssim_loss_left[i] + self.ssim_loss_cl[i] + self. ssim_loss_right[i] + self.ssim_loss_cr[i], collections= self.model_collection) tf.summary.scalar('l1_loss_' + str(i), self. l1_reconstruction_loss_left[i] + self. l1_reconstruction_loss_cl[i] + self. l1_reconstruction_loss_right[i] + self. l1_reconstruction_loss_cr[i], collections=self. model_collection) tf.summary.scalar('image_loss_' + str(i), self. image_loss_left[i] + self.image_loss_cl[i] + self. image_loss_right[i] + self.image_loss_cr[i], collections=self.model_collection) tf.summary.scalar('disp_gradient_loss_' + str(i), self. disp_lc_loss[i] + self.disp_cl_loss[i] + self. disp_rc_loss[i] + self.disp_cr_loss[i], collections= self.model_collection) tf.summary.scalar('lr_loss_' + str(i), self.lr_lc_loss[i] + self.lr_cl_loss[i] + self.lr_rc_loss[i] + self. lr_cr_loss[i], collections=self.model_collection) tf.summary.scalar('total_loss_L', self.total_loss_L, collections=self.model_collection) tf.summary.scalar('total_loss_R', self.total_loss_R, collections=self.model_collection) tf.summary.scalar('central_disparity_loss', self. central_disparity_loss, collections=self.model_collection) tf.summary.image('disp_left_est_' + str(i), self.disp_lc[i], max_outputs=4, collections=self.model_collection) tf.summary.image('disp_cl_est_' + str(i), self.disp_cl[i], max_outputs=4, collections=self.model_collection) tf.summary.image('disp_right_est_' + str(i), self.disp_rc[i ], max_outputs=4, collections=self.model_collection) tf.summary.image('disp_cr_est_' + str(i), self.disp_cr[i], max_outputs=4, collections=self.model_collection) tf.summary.image('left_pyramid_' + str(i), self. left_pyramid[i], max_outputs=4, collections=self. model_collection) tf.summary.image('central_pyramid_' + str(i), self. central_pyramid[i], max_outputs=4, collections=self. model_collection) tf.summary.image('right_pyramid_' + str(i), self. right_pyramid[i], max_outputs=4, collections=self. model_collection) tf.summary.image('left_est_' + str(i), self.left_est[i], max_outputs=4, collections=self.model_collection) tf.summary.image('cr_est_' + str(i), self.cr_est[i], max_outputs=4, collections=self.model_collection) tf.summary.image('cl_est_' + str(i), self.cl_est[i], max_outputs=4, collections=self.model_collection) tf.summary.image('right_est_' + str(i), self.right_est[i], max_outputs=4, collections=self.model_collection) if self.params.full_summary: tf.summary.image('l1_right_' + str(i), self.l1_right[i], max_outputs=4, collections=self.model_collection) tf.summary.image('l1_cr_' + str(i), self.l1_cr[i], max_outputs=4, collections=self.model_collection) if self.params.full_summary: tf.summary.image('left', self.left, max_outputs=4, collections=self.model_collection) tf.summary.image('right', self.right, max_outputs=4, collections=self.model_collection) tf.summary.image('central', self.central, max_outputs=4, collections=self.model_collection) <|reserved_special_token_1|> # # MIT License # # Copyright (c) 2018 Matteo Poggi [email protected] # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from layers import * from utils import * from collections import namedtuple trinet_parameters = namedtuple('parameters', 'encoder, ' 'height, width, ' 'batch_size, ' 'num_threads, ' 'num_epochs, ' 'alpha_image_loss, ' 'disp_gradient_loss_weight, ' 'lr_loss_weight, ' 'full_summary') class trinet(object): def __init__(self,params, mode, left, central, right, reuse_variables=None, model_index=0, net='vgg'): self.params = params self.mode = mode self.model_collection = ['model_0'] self.left = left self.right = right self.central = central self.reuse_variables = reuse_variables self.model_index = model_index self.build_model(net) self.build_outputs() if self.mode == 'test': return self.build_losses() self.build_summaries() def gradient_x(self, img): gx = img[:,:,:-1,:] - img[:,:,1:,:] return gx def gradient_y(self, img): gy = img[:,:-1,:,:] - img[:,1:,:,:] return gy def scale_pyramid(self, img, num_scales): scaled_imgs = [img] s = tf.shape(img) h = s[1] w = s[2] for i in range(num_scales - 1): ratio = 2 ** (i + 1) nh = h // ratio nw = w // ratio scaled_imgs.append(tf.image.resize_area(img, [nh, nw])) return scaled_imgs def generate_image_left(self, img, disp): return bilinear_sampler_1d_h(img, -disp) def generate_image_right(self, img, disp): return bilinear_sampler_1d_h(img, disp) def SSIM(self, x, y): C1 = 0.01 ** 2 C2 = 0.03 ** 2 mu_x = slim.avg_pool2d(x, 3, 1, 'VALID') mu_y = slim.avg_pool2d(y, 3, 1, 'VALID') sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2 sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2 sigma_xy = slim.avg_pool2d(x * y , 3, 1, 'VALID') - mu_x * mu_y SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2) SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2) SSIM = SSIM_n / SSIM_d return tf.clip_by_value((1 - SSIM) / 2, 0, 1) def get_disparity_smoothness(self, disp, pyramid): disp_gradients_x = [self.gradient_x(d) for d in disp] disp_gradients_y = [self.gradient_y(d) for d in disp] image_gradients_x = [self.gradient_x(img) for img in pyramid] image_gradients_y = [self.gradient_y(img) for img in pyramid] weights_x = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for g in image_gradients_x] weights_y = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for g in image_gradients_y] smoothness_x = [disp_gradients_x[i] * weights_x[i] for i in range(4)] smoothness_y = [disp_gradients_y[i] * weights_y[i] for i in range(4)] return smoothness_x + smoothness_y # Build model def build_model(self,net): with tf.variable_scope('model', reuse=self.reuse_variables) as scope: self.left_pyramid = self.scale_pyramid(self.left, 4) # if self.mode == 'train': self.right_pyramid = self.scale_pyramid(self.right, 4) self.central_pyramid = self.scale_pyramid(self.central, 4) with tf.variable_scope('shared-encoder'): features_cr = self.build_encoder(self.central,model_name=net) features_cl = features_cr with tf.variable_scope('encoder-C2R'): self.disp_c2r = self.build_decoder(features_cr,model_name=net) with tf.variable_scope('encoder-C2L'): self.disp_c2l = self.build_decoder(features_cl,model_name=net) # Build shared encoder def build_encoder(self, model_input, model_name='vgg'): with tf.variable_scope('encoder'): if model_name == 'vgg': conv1 = conv_block(model_input, 32, 7) # H/2 conv2 = conv_block(conv1, 64, 5) # H/4 conv3 = conv_block(conv2, 128, 3) # H/8 conv4 = conv_block(conv3, 256, 3) # H/16 conv5 = conv_block(conv4, 512, 3) # H/32 conv6 = conv_block(conv5, 512, 3) # H/64 conv7 = conv_block(conv6, 512, 3) # H/128 return conv7, conv1, conv2, conv3, conv4, conv5, conv6 elif model_name == 'resnet50': conv1 = conv(model_input, 64, 7, 2) # H/2 - 64D pool1 = maxpool(conv1, 3) # H/4 - 64D conv2 = resblock(pool1, 64, 3) # H/8 - 256D conv3 = resblock(conv2, 128, 4) # H/16 - 512D conv4 = resblock(conv3, 256, 6) # H/32 - 1024D conv5 = resblock(conv4, 512, 3) # H/64 - 2048D return conv5, conv1, pool1, conv2, conv3, conv4 def build_decoder(self, skip, model_name='vgg'): with tf.variable_scope('decoder'): if model_name == 'vgg': upconv7 = upconv(skip[0], 512, 3, 2) #H/64 concat7 = tf.concat([upconv7, skip[6]], 3) iconv7 = conv(concat7, 512, 3, 1) upconv6 = upconv(iconv7, 512, 3, 2) #H/32 concat6 = tf.concat([upconv6, skip[5]], 3) iconv6 = conv(concat6, 512, 3, 1) upconv5 = upconv(iconv6, 256, 3, 2) #H/16 concat5 = tf.concat([upconv5, skip[4]], 3) iconv5 = conv(concat5, 256, 3, 1) upconv4 = upconv(iconv5, 128, 3, 2) #H/8 concat4 = tf.concat([upconv4, skip[3]], 3) iconv4 = conv(concat4, 128, 3, 1) disp4 = get_disp(iconv4) udisp4 = upsample_nn(disp4, 2) upconv3 = upconv(iconv4, 64, 3, 2) #H/4 concat3 = tf.concat([upconv3, skip[2], udisp4], 3) iconv3 = conv(concat3, 64, 3, 1) disp3 = get_disp(iconv3) udisp3 = upsample_nn(disp3, 2) upconv2 = upconv(iconv3, 32, 3, 2) #H/2 concat2 = tf.concat([upconv2, skip[1], udisp3], 3) iconv2 = conv(concat2, 32, 3, 1) disp2 = get_disp(iconv2) udisp2 = upsample_nn(disp2, 2) upconv1 = upconv(iconv2, 16, 3, 2) #H concat1 = tf.concat([upconv1, udisp2], 3) iconv1 = conv(concat1, 16, 3, 1) disp1 = get_disp(iconv1) elif model_name == 'resnet50': upconv6 = upconv(skip[0], 512, 3, 2) #H/32 concat6 = tf.concat([upconv6, skip[5]], 3) iconv6 = conv(concat6, 512, 3, 1) upconv5 = upconv(iconv6, 256, 3, 2) #H/16 concat5 = tf.concat([upconv5, skip[4]], 3) iconv5 = conv(concat5, 256, 3, 1) upconv4 = upconv(iconv5, 128, 3, 2) #H/8 concat4 = tf.concat([upconv4, skip[3]], 3) iconv4 = conv(concat4, 128, 3, 1) disp4 = get_disp(iconv4) udisp4 = upsample_nn(disp4, 2) upconv3 = upconv(iconv4, 64, 3, 2) #H/4 concat3 = tf.concat([upconv3, skip[2], udisp4], 3) iconv3 = conv(concat3, 64, 3, 1) disp3 = get_disp(iconv3) udisp3 = upsample_nn(disp3, 2) upconv2 = upconv(iconv3, 32, 3, 2) #H/2 concat2 = tf.concat([upconv2, skip[1], udisp3], 3) iconv2 = conv(concat2, 32, 3, 1) disp2 = get_disp(iconv2) udisp2 = upsample_nn(disp2, 2) upconv1 = upconv(iconv2, 16, 3, 2) #H concat1 = tf.concat([upconv1, udisp2], 3) iconv1 = conv(concat1, 16, 3, 1) disp1 = get_disp(iconv1) return disp1, disp2, disp3, disp4 def build_outputs(self): #self.disparity_cr = self.disp_cr[0][0,:,:,0] #self.disparity_cl = self.disp_cl[0][0,:,:,0] #self.warp_left = generate_image_left(self.placeholders['im0'], self.disparity_cl)[0] #self.warp_right = generate_image_right(self.placeholders['im0'], self.disparity_cr)[0] # STORE DISPARITIES with tf.variable_scope('disparities'): self.disp_lc = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.disp_c2l] self.disp_cl = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.disp_c2l] self.disp_cr = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.disp_c2r] self.disp_rc = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.disp_c2r] # GENERATE IMAGES with tf.variable_scope('images'): self.left_est = [self.generate_image_left(self.central_pyramid[i], self.disp_lc[i]) for i in range(4)] self.cl_est = [self.generate_image_right(self.left_pyramid[i], self.disp_cl[i]) for i in range(4)] self.cr_est = [self.generate_image_left(self.right_pyramid[i], self.disp_cr[i]) for i in range(4)] self.right_est = [self.generate_image_right(self.central_pyramid[i], self.disp_rc[i]) for i in range(4)] # LR CONSISTENCY with tf.variable_scope('left-right'): self.cl_to_lc_disp = [self.generate_image_left(self.disp_cl[i], self.disp_lc[i]) for i in range(4)] self.lc_to_cl_disp = [self.generate_image_right(self.disp_lc[i], self.disp_cl[i]) for i in range(4)] self.rc_to_cr_disp = [self.generate_image_left(self.disp_rc[i], self.disp_cr[i]) for i in range(4)] self.cr_to_rc_disp = [self.generate_image_right(self.disp_cr[i], self.disp_rc[i]) for i in range(4)] # DISPARITY SMOOTHNESS with tf.variable_scope('smoothness'): self.disp_lc_smoothness = self.get_disparity_smoothness(self.disp_lc, self.left_pyramid) self.disp_cl_smoothness = self.get_disparity_smoothness(self.disp_cl, self.central_pyramid) self.disp_cr_smoothness = self.get_disparity_smoothness(self.disp_cr, self.central_pyramid) self.disp_rc_smoothness = self.get_disparity_smoothness(self.disp_rc, self.right_pyramid) def build_losses(self): with tf.variable_scope('losses', reuse=self.reuse_variables): # IMAGE RECONSTRUCTION # L1 self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for i in range(4)] self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in self.l1_left] self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[i]) for i in range(4)] self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in self.l1_right] self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for i in range(4)] self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in self.l1_cl] self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for i in range(4)] self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in self.l1_cr] # SSIM self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid[i]) for i in range(4)] self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left] self.ssim_right = [self.SSIM(self.right_est[i], self.right_pyramid[i]) for i in range(4)] self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right] self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[i]) for i in range(4)] self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl] self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[i]) for i in range(4)] self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr] # WEIGTHED SUM self.image_loss_right = [self.params.alpha_image_loss * self.ssim_loss_right[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_right[i] for i in range(4)] self.image_loss_left = [self.params.alpha_image_loss * self.ssim_loss_left[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_left[i] for i in range(4)] self.image_loss_cl = [self.params.alpha_image_loss * self.ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_cl[i] for i in range(4)] self.image_loss_cr = [self.params.alpha_image_loss * self.ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_cr[i] for i in range(4)] self.image_loss = tf.add_n(self.image_loss_left + self.image_loss_cl + self.image_loss_right + self.image_loss_cr) self.image_loss_L = tf.add_n(self.image_loss_left + self.image_loss_cl) self.image_loss_R = tf.add_n(self.image_loss_right + self.image_loss_cr) # DISPARITY SMOOTHNESS self.disp_lc_loss = [tf.reduce_mean(tf.abs(self.disp_lc_smoothness[i])) / 2 ** i for i in range(4)] self.disp_cl_loss = [tf.reduce_mean(tf.abs(self.disp_cl_smoothness[i])) / 2 ** i for i in range(4)] self.disp_rc_loss = [tf.reduce_mean(tf.abs(self.disp_rc_smoothness[i])) / 2 ** i for i in range(4)] self.disp_cr_loss = [tf.reduce_mean(tf.abs(self.disp_cr_smoothness[i])) / 2 ** i for i in range(4)] self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self.disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss) self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self.disp_cl_loss) self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self.disp_cr_loss) # LR CONSISTENCY self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] - self.disp_lc[i])) for i in range(4)] self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] - self.disp_cl[i])) for i in range(4)] self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] - self.disp_rc[i])) for i in range(4)] self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] - self.disp_cr[i])) for i in range(4)] self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss + self.lr_rc_loss + self.lr_cr_loss) self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss) self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss) # CENTRAL DISPARITY CONSISTENCY self.central_disparity_dif = [tf.reduce_mean(tf.abs(self.disp_cl[i] - self.disp_cr[i])) for i in range(4)] self.central_disparity_loss = tf.add_n(self.central_disparity_dif) # TOTAL LOSS self.total_loss = self.image_loss + self.params.disp_gradient_loss_weight * self.disp_gradient_loss + self.params.lr_loss_weight * self.lr_loss + self.central_disparity_loss self.total_loss_L = self.image_loss_L + self.params.disp_gradient_loss_weight * self.disp_gradient_loss_L + self.params.lr_loss_weight * self.lr_loss_L self.total_loss_R = self.image_loss_R + self.params.disp_gradient_loss_weight * self.disp_gradient_loss_R + self.params.lr_loss_weight * self.lr_loss_R def build_summaries(self): # SUMMARIES with tf.device('/cpu:0'): for i in range(4): tf.summary.scalar('ssim_loss_' + str(i), self.ssim_loss_left[i] + self.ssim_loss_cl[i] + self.ssim_loss_right[i] + self.ssim_loss_cr[i], collections=self.model_collection) tf.summary.scalar('l1_loss_' + str(i), self.l1_reconstruction_loss_left[i] + self.l1_reconstruction_loss_cl[i] + self.l1_reconstruction_loss_right[i] + self.l1_reconstruction_loss_cr[i], collections=self.model_collection) tf.summary.scalar('image_loss_' + str(i), self.image_loss_left[i] + self.image_loss_cl[i] + self.image_loss_right[i] + self.image_loss_cr[i], collections=self.model_collection) tf.summary.scalar('disp_gradient_loss_' + str(i), self.disp_lc_loss[i] + self.disp_cl_loss[i] + self.disp_rc_loss[i] + self.disp_cr_loss[i], collections=self.model_collection) tf.summary.scalar('lr_loss_' + str(i), self.lr_lc_loss[i] + self.lr_cl_loss[i] + self.lr_rc_loss[i] + self.lr_cr_loss[i], collections=self.model_collection) tf.summary.scalar('total_loss_L', self.total_loss_L, collections= self.model_collection) tf.summary.scalar('total_loss_R', self.total_loss_R, collections=self.model_collection) tf.summary.scalar('central_disparity_loss', self.central_disparity_loss, collections=self.model_collection) tf.summary.image('disp_left_est_' + str(i), self.disp_lc[i], max_outputs=4, collections=self.model_collection) tf.summary.image('disp_cl_est_' + str(i), self.disp_cl[i], max_outputs=4, collections=self.model_collection) tf.summary.image('disp_right_est_' + str(i), self.disp_rc[i], max_outputs=4, collections=self.model_collection) tf.summary.image('disp_cr_est_' + str(i), self.disp_cr[i], max_outputs=4, collections=self.model_collection) tf.summary.image('left_pyramid_' + str(i), self.left_pyramid[i], max_outputs=4, collections=self.model_collection) tf.summary.image('central_pyramid_' + str(i), self.central_pyramid[i], max_outputs=4, collections=self.model_collection) tf.summary.image('right_pyramid_' + str(i), self.right_pyramid[i], max_outputs=4, collections=self.model_collection) tf.summary.image('left_est_' + str(i), self.left_est[i], max_outputs=4, collections=self.model_collection) tf.summary.image('cr_est_' + str(i), self.cr_est[i], max_outputs=4, collections=self.model_collection) tf.summary.image('cl_est_' + str(i), self.cl_est[i], max_outputs=4, collections=self.model_collection) tf.summary.image('right_est_' + str(i), self.right_est[i], max_outputs=4, collections=self.model_collection) if self.params.full_summary: #tf.summary.image('left_est_' + str(i), self.left_est[i], max_outputs=4, collections=self.model_collection) #tf.summary.image('right_est_' + str(i), self.right_est[i], max_outputs=4, collections=self.model_collection) #tf.summary.image('cl_est_' + str(i), self.cl_est[i], max_outputs=4, collections=self.model_collection) #tf.summary.image('cr_est_' + str(i), self.cr_est[i], max_outputs=4, collections=self.model_collection) #tf.summary.image('ssim_left_' + str(i), self.ssim_left[i], max_outputs=4, collections=self.model_collection) #tf.summary.image('ssim_right_' + str(i), self.ssim_right[i], max_outputs=4, collections=self.model_collection) #tf.summary.image('ssim_cl_' + str(i), self.ssim_cl[i], max_outputs=4, collections=self.model_collection) #tf.summary.image('ssim_cr_' + str(i), self.ssim_cr[i], max_outputs=4, collections=self.model_collection) #tf.summary.image('l1_left_' + str(i), self.l1_left[i], max_outputs=4, collections=self.model_collection) tf.summary.image('l1_right_' + str(i), self.l1_right[i], max_outputs=4, collections=self.model_collection) #tf.summary.image('l1_cl_' + str(i), self.l1_cl[i], max_outputs=4, collections=self.model_collection) tf.summary.image('l1_cr_' + str(i), self.l1_cr[i], max_outputs=4, collections=self.model_collection) if self.params.full_summary: tf.summary.image('left', self.left, max_outputs=4, collections=self.model_collection) tf.summary.image('right', self.right, max_outputs=4, collections=self.model_collection) tf.summary.image('central', self.central, max_outputs=4, collections=self.model_collection)
flexible
{ "blob_id": "fbd8af4ab3e4ebdcb07509db776d38f9c26fd06a", "index": 9446, "step-1": "<mask token>\n\n\nclass trinet(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def generate_image_left(self, img, disp):\n return bilinear_sampler_1d_h(img, -disp)\n\n def generate_image_right(self, img, disp):\n return bilinear_sampler_1d_h(img, disp)\n\n def SSIM(self, x, y):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')\n mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')\n sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2\n sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2\n sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y\n SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)\n SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)\n SSIM = SSIM_n / SSIM_d\n return tf.clip_by_value((1 - SSIM) / 2, 0, 1)\n <mask token>\n\n def build_model(self, net):\n with tf.variable_scope('model', reuse=self.reuse_variables) as scope:\n self.left_pyramid = self.scale_pyramid(self.left, 4)\n self.right_pyramid = self.scale_pyramid(self.right, 4)\n self.central_pyramid = self.scale_pyramid(self.central, 4)\n with tf.variable_scope('shared-encoder'):\n features_cr = self.build_encoder(self.central, model_name=net)\n features_cl = features_cr\n with tf.variable_scope('encoder-C2R'):\n self.disp_c2r = self.build_decoder(features_cr, model_name=net)\n with tf.variable_scope('encoder-C2L'):\n self.disp_c2l = self.build_decoder(features_cl, model_name=net)\n\n def build_encoder(self, model_input, model_name='vgg'):\n with tf.variable_scope('encoder'):\n if model_name == 'vgg':\n conv1 = conv_block(model_input, 32, 7)\n conv2 = conv_block(conv1, 64, 5)\n conv3 = conv_block(conv2, 128, 3)\n conv4 = conv_block(conv3, 256, 3)\n conv5 = conv_block(conv4, 512, 3)\n conv6 = conv_block(conv5, 512, 3)\n conv7 = conv_block(conv6, 512, 3)\n return conv7, conv1, conv2, conv3, conv4, conv5, conv6\n elif model_name == 'resnet50':\n conv1 = conv(model_input, 64, 7, 2)\n pool1 = maxpool(conv1, 3)\n conv2 = resblock(pool1, 64, 3)\n conv3 = resblock(conv2, 128, 4)\n conv4 = resblock(conv3, 256, 6)\n conv5 = resblock(conv4, 512, 3)\n return conv5, conv1, pool1, conv2, conv3, conv4\n\n def build_decoder(self, skip, model_name='vgg'):\n with tf.variable_scope('decoder'):\n if model_name == 'vgg':\n upconv7 = upconv(skip[0], 512, 3, 2)\n concat7 = tf.concat([upconv7, skip[6]], 3)\n iconv7 = conv(concat7, 512, 3, 1)\n upconv6 = upconv(iconv7, 512, 3, 2)\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n upconv5 = upconv(iconv6, 256, 3, 2)\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n upconv4 = upconv(iconv5, 128, 3, 2)\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n upconv3 = upconv(iconv4, 64, 3, 2)\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n upconv2 = upconv(iconv3, 32, 3, 2)\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n upconv1 = upconv(iconv2, 16, 3, 2)\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n elif model_name == 'resnet50':\n upconv6 = upconv(skip[0], 512, 3, 2)\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n upconv5 = upconv(iconv6, 256, 3, 2)\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n upconv4 = upconv(iconv5, 128, 3, 2)\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n upconv3 = upconv(iconv4, 64, 3, 2)\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n upconv2 = upconv(iconv3, 32, 3, 2)\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n upconv1 = upconv(iconv2, 16, 3, 2)\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n return disp1, disp2, disp3, disp4\n <mask token>\n\n def build_losses(self):\n with tf.variable_scope('losses', reuse=self.reuse_variables):\n self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in\n self.l1_left]\n self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[\n i]) for i in range(4)]\n self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in\n self.l1_right]\n self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in\n self.l1_cl]\n self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in\n self.l1_cr]\n self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid\n [i]) for i in range(4)]\n self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left]\n self.ssim_right = [self.SSIM(self.right_est[i], self.\n right_pyramid[i]) for i in range(4)]\n self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right]\n self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[\n i]) for i in range(4)]\n self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl]\n self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[\n i]) for i in range(4)]\n self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr]\n self.image_loss_right = [(self.params.alpha_image_loss * self.\n ssim_loss_right[i] + (1 - self.params.alpha_image_loss) *\n self.l1_reconstruction_loss_right[i]) for i in range(4)]\n self.image_loss_left = [(self.params.alpha_image_loss * self.\n ssim_loss_left[i] + (1 - self.params.alpha_image_loss) *\n self.l1_reconstruction_loss_left[i]) for i in range(4)]\n self.image_loss_cl = [(self.params.alpha_image_loss * self.\n ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self\n .l1_reconstruction_loss_cl[i]) for i in range(4)]\n self.image_loss_cr = [(self.params.alpha_image_loss * self.\n ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self\n .l1_reconstruction_loss_cr[i]) for i in range(4)]\n self.image_loss = tf.add_n(self.image_loss_left + self.\n image_loss_cl + self.image_loss_right + self.image_loss_cr)\n self.image_loss_L = tf.add_n(self.image_loss_left + self.\n image_loss_cl)\n self.image_loss_R = tf.add_n(self.image_loss_right + self.\n image_loss_cr)\n self.disp_lc_loss = [(tf.reduce_mean(tf.abs(self.\n disp_lc_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_cl_loss = [(tf.reduce_mean(tf.abs(self.\n disp_cl_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_rc_loss = [(tf.reduce_mean(tf.abs(self.\n disp_rc_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_cr_loss = [(tf.reduce_mean(tf.abs(self.\n disp_cr_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self.\n disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss)\n self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self.\n disp_cl_loss)\n self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self.\n disp_cr_loss)\n self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] -\n self.disp_lc[i])) for i in range(4)]\n self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] -\n self.disp_cl[i])) for i in range(4)]\n self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] -\n self.disp_rc[i])) for i in range(4)]\n self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] -\n self.disp_cr[i])) for i in range(4)]\n self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss +\n self.lr_rc_loss + self.lr_cr_loss)\n self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss)\n self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss)\n self.central_disparity_dif = [tf.reduce_mean(tf.abs(self.\n disp_cl[i] - self.disp_cr[i])) for i in range(4)]\n self.central_disparity_loss = tf.add_n(self.central_disparity_dif)\n self.total_loss = (self.image_loss + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss + self.\n params.lr_loss_weight * self.lr_loss + self.\n central_disparity_loss)\n self.total_loss_L = (self.image_loss_L + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss_L + \n self.params.lr_loss_weight * self.lr_loss_L)\n self.total_loss_R = (self.image_loss_R + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss_R + \n self.params.lr_loss_weight * self.lr_loss_R)\n <mask token>\n", "step-2": "<mask token>\n\n\nclass trinet(object):\n\n def __init__(self, params, mode, left, central, right, reuse_variables=\n None, model_index=0, net='vgg'):\n self.params = params\n self.mode = mode\n self.model_collection = ['model_0']\n self.left = left\n self.right = right\n self.central = central\n self.reuse_variables = reuse_variables\n self.model_index = model_index\n self.build_model(net)\n self.build_outputs()\n if self.mode == 'test':\n return\n self.build_losses()\n self.build_summaries()\n\n def gradient_x(self, img):\n gx = img[:, :, :-1, :] - img[:, :, 1:, :]\n return gx\n\n def gradient_y(self, img):\n gy = img[:, :-1, :, :] - img[:, 1:, :, :]\n return gy\n\n def scale_pyramid(self, img, num_scales):\n scaled_imgs = [img]\n s = tf.shape(img)\n h = s[1]\n w = s[2]\n for i in range(num_scales - 1):\n ratio = 2 ** (i + 1)\n nh = h // ratio\n nw = w // ratio\n scaled_imgs.append(tf.image.resize_area(img, [nh, nw]))\n return scaled_imgs\n\n def generate_image_left(self, img, disp):\n return bilinear_sampler_1d_h(img, -disp)\n\n def generate_image_right(self, img, disp):\n return bilinear_sampler_1d_h(img, disp)\n\n def SSIM(self, x, y):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')\n mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')\n sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2\n sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2\n sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y\n SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)\n SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)\n SSIM = SSIM_n / SSIM_d\n return tf.clip_by_value((1 - SSIM) / 2, 0, 1)\n <mask token>\n\n def build_model(self, net):\n with tf.variable_scope('model', reuse=self.reuse_variables) as scope:\n self.left_pyramid = self.scale_pyramid(self.left, 4)\n self.right_pyramid = self.scale_pyramid(self.right, 4)\n self.central_pyramid = self.scale_pyramid(self.central, 4)\n with tf.variable_scope('shared-encoder'):\n features_cr = self.build_encoder(self.central, model_name=net)\n features_cl = features_cr\n with tf.variable_scope('encoder-C2R'):\n self.disp_c2r = self.build_decoder(features_cr, model_name=net)\n with tf.variable_scope('encoder-C2L'):\n self.disp_c2l = self.build_decoder(features_cl, model_name=net)\n\n def build_encoder(self, model_input, model_name='vgg'):\n with tf.variable_scope('encoder'):\n if model_name == 'vgg':\n conv1 = conv_block(model_input, 32, 7)\n conv2 = conv_block(conv1, 64, 5)\n conv3 = conv_block(conv2, 128, 3)\n conv4 = conv_block(conv3, 256, 3)\n conv5 = conv_block(conv4, 512, 3)\n conv6 = conv_block(conv5, 512, 3)\n conv7 = conv_block(conv6, 512, 3)\n return conv7, conv1, conv2, conv3, conv4, conv5, conv6\n elif model_name == 'resnet50':\n conv1 = conv(model_input, 64, 7, 2)\n pool1 = maxpool(conv1, 3)\n conv2 = resblock(pool1, 64, 3)\n conv3 = resblock(conv2, 128, 4)\n conv4 = resblock(conv3, 256, 6)\n conv5 = resblock(conv4, 512, 3)\n return conv5, conv1, pool1, conv2, conv3, conv4\n\n def build_decoder(self, skip, model_name='vgg'):\n with tf.variable_scope('decoder'):\n if model_name == 'vgg':\n upconv7 = upconv(skip[0], 512, 3, 2)\n concat7 = tf.concat([upconv7, skip[6]], 3)\n iconv7 = conv(concat7, 512, 3, 1)\n upconv6 = upconv(iconv7, 512, 3, 2)\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n upconv5 = upconv(iconv6, 256, 3, 2)\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n upconv4 = upconv(iconv5, 128, 3, 2)\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n upconv3 = upconv(iconv4, 64, 3, 2)\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n upconv2 = upconv(iconv3, 32, 3, 2)\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n upconv1 = upconv(iconv2, 16, 3, 2)\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n elif model_name == 'resnet50':\n upconv6 = upconv(skip[0], 512, 3, 2)\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n upconv5 = upconv(iconv6, 256, 3, 2)\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n upconv4 = upconv(iconv5, 128, 3, 2)\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n upconv3 = upconv(iconv4, 64, 3, 2)\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n upconv2 = upconv(iconv3, 32, 3, 2)\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n upconv1 = upconv(iconv2, 16, 3, 2)\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n return disp1, disp2, disp3, disp4\n\n def build_outputs(self):\n with tf.variable_scope('disparities'):\n self.disp_lc = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.\n disp_c2l]\n self.disp_cl = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.\n disp_c2l]\n self.disp_cr = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.\n disp_c2r]\n self.disp_rc = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.\n disp_c2r]\n with tf.variable_scope('images'):\n self.left_est = [self.generate_image_left(self.central_pyramid[\n i], self.disp_lc[i]) for i in range(4)]\n self.cl_est = [self.generate_image_right(self.left_pyramid[i],\n self.disp_cl[i]) for i in range(4)]\n self.cr_est = [self.generate_image_left(self.right_pyramid[i],\n self.disp_cr[i]) for i in range(4)]\n self.right_est = [self.generate_image_right(self.\n central_pyramid[i], self.disp_rc[i]) for i in range(4)]\n with tf.variable_scope('left-right'):\n self.cl_to_lc_disp = [self.generate_image_left(self.disp_cl[i],\n self.disp_lc[i]) for i in range(4)]\n self.lc_to_cl_disp = [self.generate_image_right(self.disp_lc[i],\n self.disp_cl[i]) for i in range(4)]\n self.rc_to_cr_disp = [self.generate_image_left(self.disp_rc[i],\n self.disp_cr[i]) for i in range(4)]\n self.cr_to_rc_disp = [self.generate_image_right(self.disp_cr[i],\n self.disp_rc[i]) for i in range(4)]\n with tf.variable_scope('smoothness'):\n self.disp_lc_smoothness = self.get_disparity_smoothness(self.\n disp_lc, self.left_pyramid)\n self.disp_cl_smoothness = self.get_disparity_smoothness(self.\n disp_cl, self.central_pyramid)\n self.disp_cr_smoothness = self.get_disparity_smoothness(self.\n disp_cr, self.central_pyramid)\n self.disp_rc_smoothness = self.get_disparity_smoothness(self.\n disp_rc, self.right_pyramid)\n\n def build_losses(self):\n with tf.variable_scope('losses', reuse=self.reuse_variables):\n self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in\n self.l1_left]\n self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[\n i]) for i in range(4)]\n self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in\n self.l1_right]\n self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in\n self.l1_cl]\n self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in\n self.l1_cr]\n self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid\n [i]) for i in range(4)]\n self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left]\n self.ssim_right = [self.SSIM(self.right_est[i], self.\n right_pyramid[i]) for i in range(4)]\n self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right]\n self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[\n i]) for i in range(4)]\n self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl]\n self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[\n i]) for i in range(4)]\n self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr]\n self.image_loss_right = [(self.params.alpha_image_loss * self.\n ssim_loss_right[i] + (1 - self.params.alpha_image_loss) *\n self.l1_reconstruction_loss_right[i]) for i in range(4)]\n self.image_loss_left = [(self.params.alpha_image_loss * self.\n ssim_loss_left[i] + (1 - self.params.alpha_image_loss) *\n self.l1_reconstruction_loss_left[i]) for i in range(4)]\n self.image_loss_cl = [(self.params.alpha_image_loss * self.\n ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self\n .l1_reconstruction_loss_cl[i]) for i in range(4)]\n self.image_loss_cr = [(self.params.alpha_image_loss * self.\n ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self\n .l1_reconstruction_loss_cr[i]) for i in range(4)]\n self.image_loss = tf.add_n(self.image_loss_left + self.\n image_loss_cl + self.image_loss_right + self.image_loss_cr)\n self.image_loss_L = tf.add_n(self.image_loss_left + self.\n image_loss_cl)\n self.image_loss_R = tf.add_n(self.image_loss_right + self.\n image_loss_cr)\n self.disp_lc_loss = [(tf.reduce_mean(tf.abs(self.\n disp_lc_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_cl_loss = [(tf.reduce_mean(tf.abs(self.\n disp_cl_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_rc_loss = [(tf.reduce_mean(tf.abs(self.\n disp_rc_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_cr_loss = [(tf.reduce_mean(tf.abs(self.\n disp_cr_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self.\n disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss)\n self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self.\n disp_cl_loss)\n self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self.\n disp_cr_loss)\n self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] -\n self.disp_lc[i])) for i in range(4)]\n self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] -\n self.disp_cl[i])) for i in range(4)]\n self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] -\n self.disp_rc[i])) for i in range(4)]\n self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] -\n self.disp_cr[i])) for i in range(4)]\n self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss +\n self.lr_rc_loss + self.lr_cr_loss)\n self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss)\n self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss)\n self.central_disparity_dif = [tf.reduce_mean(tf.abs(self.\n disp_cl[i] - self.disp_cr[i])) for i in range(4)]\n self.central_disparity_loss = tf.add_n(self.central_disparity_dif)\n self.total_loss = (self.image_loss + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss + self.\n params.lr_loss_weight * self.lr_loss + self.\n central_disparity_loss)\n self.total_loss_L = (self.image_loss_L + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss_L + \n self.params.lr_loss_weight * self.lr_loss_L)\n self.total_loss_R = (self.image_loss_R + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss_R + \n self.params.lr_loss_weight * self.lr_loss_R)\n\n def build_summaries(self):\n with tf.device('/cpu:0'):\n for i in range(4):\n tf.summary.scalar('ssim_loss_' + str(i), self.\n ssim_loss_left[i] + self.ssim_loss_cl[i] + self.\n ssim_loss_right[i] + self.ssim_loss_cr[i], collections=\n self.model_collection)\n tf.summary.scalar('l1_loss_' + str(i), self.\n l1_reconstruction_loss_left[i] + self.\n l1_reconstruction_loss_cl[i] + self.\n l1_reconstruction_loss_right[i] + self.\n l1_reconstruction_loss_cr[i], collections=self.\n model_collection)\n tf.summary.scalar('image_loss_' + str(i), self.\n image_loss_left[i] + self.image_loss_cl[i] + self.\n image_loss_right[i] + self.image_loss_cr[i],\n collections=self.model_collection)\n tf.summary.scalar('disp_gradient_loss_' + str(i), self.\n disp_lc_loss[i] + self.disp_cl_loss[i] + self.\n disp_rc_loss[i] + self.disp_cr_loss[i], collections=\n self.model_collection)\n tf.summary.scalar('lr_loss_' + str(i), self.lr_lc_loss[i] +\n self.lr_cl_loss[i] + self.lr_rc_loss[i] + self.\n lr_cr_loss[i], collections=self.model_collection)\n tf.summary.scalar('total_loss_L', self.total_loss_L,\n collections=self.model_collection)\n tf.summary.scalar('total_loss_R', self.total_loss_R,\n collections=self.model_collection)\n tf.summary.scalar('central_disparity_loss', self.\n central_disparity_loss, collections=self.model_collection)\n tf.summary.image('disp_left_est_' + str(i), self.disp_lc[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_cl_est_' + str(i), self.disp_cl[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_right_est_' + str(i), self.disp_rc[i\n ], max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_cr_est_' + str(i), self.disp_cr[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('left_pyramid_' + str(i), self.\n left_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('central_pyramid_' + str(i), self.\n central_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('right_pyramid_' + str(i), self.\n right_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('left_est_' + str(i), self.left_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('cr_est_' + str(i), self.cr_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('cl_est_' + str(i), self.cl_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('right_est_' + str(i), self.right_est[i],\n max_outputs=4, collections=self.model_collection)\n if self.params.full_summary:\n tf.summary.image('l1_right_' + str(i), self.l1_right[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('l1_cr_' + str(i), self.l1_cr[i],\n max_outputs=4, collections=self.model_collection)\n if self.params.full_summary:\n tf.summary.image('left', self.left, max_outputs=4,\n collections=self.model_collection)\n tf.summary.image('right', self.right, max_outputs=4,\n collections=self.model_collection)\n tf.summary.image('central', self.central, max_outputs=4,\n collections=self.model_collection)\n", "step-3": "<mask token>\n\n\nclass trinet(object):\n\n def __init__(self, params, mode, left, central, right, reuse_variables=\n None, model_index=0, net='vgg'):\n self.params = params\n self.mode = mode\n self.model_collection = ['model_0']\n self.left = left\n self.right = right\n self.central = central\n self.reuse_variables = reuse_variables\n self.model_index = model_index\n self.build_model(net)\n self.build_outputs()\n if self.mode == 'test':\n return\n self.build_losses()\n self.build_summaries()\n\n def gradient_x(self, img):\n gx = img[:, :, :-1, :] - img[:, :, 1:, :]\n return gx\n\n def gradient_y(self, img):\n gy = img[:, :-1, :, :] - img[:, 1:, :, :]\n return gy\n\n def scale_pyramid(self, img, num_scales):\n scaled_imgs = [img]\n s = tf.shape(img)\n h = s[1]\n w = s[2]\n for i in range(num_scales - 1):\n ratio = 2 ** (i + 1)\n nh = h // ratio\n nw = w // ratio\n scaled_imgs.append(tf.image.resize_area(img, [nh, nw]))\n return scaled_imgs\n\n def generate_image_left(self, img, disp):\n return bilinear_sampler_1d_h(img, -disp)\n\n def generate_image_right(self, img, disp):\n return bilinear_sampler_1d_h(img, disp)\n\n def SSIM(self, x, y):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')\n mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')\n sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2\n sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2\n sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y\n SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)\n SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)\n SSIM = SSIM_n / SSIM_d\n return tf.clip_by_value((1 - SSIM) / 2, 0, 1)\n\n def get_disparity_smoothness(self, disp, pyramid):\n disp_gradients_x = [self.gradient_x(d) for d in disp]\n disp_gradients_y = [self.gradient_y(d) for d in disp]\n image_gradients_x = [self.gradient_x(img) for img in pyramid]\n image_gradients_y = [self.gradient_y(img) for img in pyramid]\n weights_x = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for\n g in image_gradients_x]\n weights_y = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for\n g in image_gradients_y]\n smoothness_x = [(disp_gradients_x[i] * weights_x[i]) for i in range(4)]\n smoothness_y = [(disp_gradients_y[i] * weights_y[i]) for i in range(4)]\n return smoothness_x + smoothness_y\n\n def build_model(self, net):\n with tf.variable_scope('model', reuse=self.reuse_variables) as scope:\n self.left_pyramid = self.scale_pyramid(self.left, 4)\n self.right_pyramid = self.scale_pyramid(self.right, 4)\n self.central_pyramid = self.scale_pyramid(self.central, 4)\n with tf.variable_scope('shared-encoder'):\n features_cr = self.build_encoder(self.central, model_name=net)\n features_cl = features_cr\n with tf.variable_scope('encoder-C2R'):\n self.disp_c2r = self.build_decoder(features_cr, model_name=net)\n with tf.variable_scope('encoder-C2L'):\n self.disp_c2l = self.build_decoder(features_cl, model_name=net)\n\n def build_encoder(self, model_input, model_name='vgg'):\n with tf.variable_scope('encoder'):\n if model_name == 'vgg':\n conv1 = conv_block(model_input, 32, 7)\n conv2 = conv_block(conv1, 64, 5)\n conv3 = conv_block(conv2, 128, 3)\n conv4 = conv_block(conv3, 256, 3)\n conv5 = conv_block(conv4, 512, 3)\n conv6 = conv_block(conv5, 512, 3)\n conv7 = conv_block(conv6, 512, 3)\n return conv7, conv1, conv2, conv3, conv4, conv5, conv6\n elif model_name == 'resnet50':\n conv1 = conv(model_input, 64, 7, 2)\n pool1 = maxpool(conv1, 3)\n conv2 = resblock(pool1, 64, 3)\n conv3 = resblock(conv2, 128, 4)\n conv4 = resblock(conv3, 256, 6)\n conv5 = resblock(conv4, 512, 3)\n return conv5, conv1, pool1, conv2, conv3, conv4\n\n def build_decoder(self, skip, model_name='vgg'):\n with tf.variable_scope('decoder'):\n if model_name == 'vgg':\n upconv7 = upconv(skip[0], 512, 3, 2)\n concat7 = tf.concat([upconv7, skip[6]], 3)\n iconv7 = conv(concat7, 512, 3, 1)\n upconv6 = upconv(iconv7, 512, 3, 2)\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n upconv5 = upconv(iconv6, 256, 3, 2)\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n upconv4 = upconv(iconv5, 128, 3, 2)\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n upconv3 = upconv(iconv4, 64, 3, 2)\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n upconv2 = upconv(iconv3, 32, 3, 2)\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n upconv1 = upconv(iconv2, 16, 3, 2)\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n elif model_name == 'resnet50':\n upconv6 = upconv(skip[0], 512, 3, 2)\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n upconv5 = upconv(iconv6, 256, 3, 2)\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n upconv4 = upconv(iconv5, 128, 3, 2)\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n upconv3 = upconv(iconv4, 64, 3, 2)\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n upconv2 = upconv(iconv3, 32, 3, 2)\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n upconv1 = upconv(iconv2, 16, 3, 2)\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n return disp1, disp2, disp3, disp4\n\n def build_outputs(self):\n with tf.variable_scope('disparities'):\n self.disp_lc = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.\n disp_c2l]\n self.disp_cl = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.\n disp_c2l]\n self.disp_cr = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.\n disp_c2r]\n self.disp_rc = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.\n disp_c2r]\n with tf.variable_scope('images'):\n self.left_est = [self.generate_image_left(self.central_pyramid[\n i], self.disp_lc[i]) for i in range(4)]\n self.cl_est = [self.generate_image_right(self.left_pyramid[i],\n self.disp_cl[i]) for i in range(4)]\n self.cr_est = [self.generate_image_left(self.right_pyramid[i],\n self.disp_cr[i]) for i in range(4)]\n self.right_est = [self.generate_image_right(self.\n central_pyramid[i], self.disp_rc[i]) for i in range(4)]\n with tf.variable_scope('left-right'):\n self.cl_to_lc_disp = [self.generate_image_left(self.disp_cl[i],\n self.disp_lc[i]) for i in range(4)]\n self.lc_to_cl_disp = [self.generate_image_right(self.disp_lc[i],\n self.disp_cl[i]) for i in range(4)]\n self.rc_to_cr_disp = [self.generate_image_left(self.disp_rc[i],\n self.disp_cr[i]) for i in range(4)]\n self.cr_to_rc_disp = [self.generate_image_right(self.disp_cr[i],\n self.disp_rc[i]) for i in range(4)]\n with tf.variable_scope('smoothness'):\n self.disp_lc_smoothness = self.get_disparity_smoothness(self.\n disp_lc, self.left_pyramid)\n self.disp_cl_smoothness = self.get_disparity_smoothness(self.\n disp_cl, self.central_pyramid)\n self.disp_cr_smoothness = self.get_disparity_smoothness(self.\n disp_cr, self.central_pyramid)\n self.disp_rc_smoothness = self.get_disparity_smoothness(self.\n disp_rc, self.right_pyramid)\n\n def build_losses(self):\n with tf.variable_scope('losses', reuse=self.reuse_variables):\n self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in\n self.l1_left]\n self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[\n i]) for i in range(4)]\n self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in\n self.l1_right]\n self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in\n self.l1_cl]\n self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in\n self.l1_cr]\n self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid\n [i]) for i in range(4)]\n self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left]\n self.ssim_right = [self.SSIM(self.right_est[i], self.\n right_pyramid[i]) for i in range(4)]\n self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right]\n self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[\n i]) for i in range(4)]\n self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl]\n self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[\n i]) for i in range(4)]\n self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr]\n self.image_loss_right = [(self.params.alpha_image_loss * self.\n ssim_loss_right[i] + (1 - self.params.alpha_image_loss) *\n self.l1_reconstruction_loss_right[i]) for i in range(4)]\n self.image_loss_left = [(self.params.alpha_image_loss * self.\n ssim_loss_left[i] + (1 - self.params.alpha_image_loss) *\n self.l1_reconstruction_loss_left[i]) for i in range(4)]\n self.image_loss_cl = [(self.params.alpha_image_loss * self.\n ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self\n .l1_reconstruction_loss_cl[i]) for i in range(4)]\n self.image_loss_cr = [(self.params.alpha_image_loss * self.\n ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self\n .l1_reconstruction_loss_cr[i]) for i in range(4)]\n self.image_loss = tf.add_n(self.image_loss_left + self.\n image_loss_cl + self.image_loss_right + self.image_loss_cr)\n self.image_loss_L = tf.add_n(self.image_loss_left + self.\n image_loss_cl)\n self.image_loss_R = tf.add_n(self.image_loss_right + self.\n image_loss_cr)\n self.disp_lc_loss = [(tf.reduce_mean(tf.abs(self.\n disp_lc_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_cl_loss = [(tf.reduce_mean(tf.abs(self.\n disp_cl_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_rc_loss = [(tf.reduce_mean(tf.abs(self.\n disp_rc_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_cr_loss = [(tf.reduce_mean(tf.abs(self.\n disp_cr_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self.\n disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss)\n self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self.\n disp_cl_loss)\n self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self.\n disp_cr_loss)\n self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] -\n self.disp_lc[i])) for i in range(4)]\n self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] -\n self.disp_cl[i])) for i in range(4)]\n self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] -\n self.disp_rc[i])) for i in range(4)]\n self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] -\n self.disp_cr[i])) for i in range(4)]\n self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss +\n self.lr_rc_loss + self.lr_cr_loss)\n self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss)\n self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss)\n self.central_disparity_dif = [tf.reduce_mean(tf.abs(self.\n disp_cl[i] - self.disp_cr[i])) for i in range(4)]\n self.central_disparity_loss = tf.add_n(self.central_disparity_dif)\n self.total_loss = (self.image_loss + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss + self.\n params.lr_loss_weight * self.lr_loss + self.\n central_disparity_loss)\n self.total_loss_L = (self.image_loss_L + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss_L + \n self.params.lr_loss_weight * self.lr_loss_L)\n self.total_loss_R = (self.image_loss_R + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss_R + \n self.params.lr_loss_weight * self.lr_loss_R)\n\n def build_summaries(self):\n with tf.device('/cpu:0'):\n for i in range(4):\n tf.summary.scalar('ssim_loss_' + str(i), self.\n ssim_loss_left[i] + self.ssim_loss_cl[i] + self.\n ssim_loss_right[i] + self.ssim_loss_cr[i], collections=\n self.model_collection)\n tf.summary.scalar('l1_loss_' + str(i), self.\n l1_reconstruction_loss_left[i] + self.\n l1_reconstruction_loss_cl[i] + self.\n l1_reconstruction_loss_right[i] + self.\n l1_reconstruction_loss_cr[i], collections=self.\n model_collection)\n tf.summary.scalar('image_loss_' + str(i), self.\n image_loss_left[i] + self.image_loss_cl[i] + self.\n image_loss_right[i] + self.image_loss_cr[i],\n collections=self.model_collection)\n tf.summary.scalar('disp_gradient_loss_' + str(i), self.\n disp_lc_loss[i] + self.disp_cl_loss[i] + self.\n disp_rc_loss[i] + self.disp_cr_loss[i], collections=\n self.model_collection)\n tf.summary.scalar('lr_loss_' + str(i), self.lr_lc_loss[i] +\n self.lr_cl_loss[i] + self.lr_rc_loss[i] + self.\n lr_cr_loss[i], collections=self.model_collection)\n tf.summary.scalar('total_loss_L', self.total_loss_L,\n collections=self.model_collection)\n tf.summary.scalar('total_loss_R', self.total_loss_R,\n collections=self.model_collection)\n tf.summary.scalar('central_disparity_loss', self.\n central_disparity_loss, collections=self.model_collection)\n tf.summary.image('disp_left_est_' + str(i), self.disp_lc[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_cl_est_' + str(i), self.disp_cl[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_right_est_' + str(i), self.disp_rc[i\n ], max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_cr_est_' + str(i), self.disp_cr[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('left_pyramid_' + str(i), self.\n left_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('central_pyramid_' + str(i), self.\n central_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('right_pyramid_' + str(i), self.\n right_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('left_est_' + str(i), self.left_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('cr_est_' + str(i), self.cr_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('cl_est_' + str(i), self.cl_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('right_est_' + str(i), self.right_est[i],\n max_outputs=4, collections=self.model_collection)\n if self.params.full_summary:\n tf.summary.image('l1_right_' + str(i), self.l1_right[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('l1_cr_' + str(i), self.l1_cr[i],\n max_outputs=4, collections=self.model_collection)\n if self.params.full_summary:\n tf.summary.image('left', self.left, max_outputs=4,\n collections=self.model_collection)\n tf.summary.image('right', self.right, max_outputs=4,\n collections=self.model_collection)\n tf.summary.image('central', self.central, max_outputs=4,\n collections=self.model_collection)\n", "step-4": "from layers import *\nfrom utils import *\nfrom collections import namedtuple\ntrinet_parameters = namedtuple('parameters',\n 'encoder, height, width, batch_size, num_threads, num_epochs, alpha_image_loss, disp_gradient_loss_weight, lr_loss_weight, full_summary'\n )\n\n\nclass trinet(object):\n\n def __init__(self, params, mode, left, central, right, reuse_variables=\n None, model_index=0, net='vgg'):\n self.params = params\n self.mode = mode\n self.model_collection = ['model_0']\n self.left = left\n self.right = right\n self.central = central\n self.reuse_variables = reuse_variables\n self.model_index = model_index\n self.build_model(net)\n self.build_outputs()\n if self.mode == 'test':\n return\n self.build_losses()\n self.build_summaries()\n\n def gradient_x(self, img):\n gx = img[:, :, :-1, :] - img[:, :, 1:, :]\n return gx\n\n def gradient_y(self, img):\n gy = img[:, :-1, :, :] - img[:, 1:, :, :]\n return gy\n\n def scale_pyramid(self, img, num_scales):\n scaled_imgs = [img]\n s = tf.shape(img)\n h = s[1]\n w = s[2]\n for i in range(num_scales - 1):\n ratio = 2 ** (i + 1)\n nh = h // ratio\n nw = w // ratio\n scaled_imgs.append(tf.image.resize_area(img, [nh, nw]))\n return scaled_imgs\n\n def generate_image_left(self, img, disp):\n return bilinear_sampler_1d_h(img, -disp)\n\n def generate_image_right(self, img, disp):\n return bilinear_sampler_1d_h(img, disp)\n\n def SSIM(self, x, y):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')\n mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')\n sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2\n sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2\n sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y\n SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)\n SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)\n SSIM = SSIM_n / SSIM_d\n return tf.clip_by_value((1 - SSIM) / 2, 0, 1)\n\n def get_disparity_smoothness(self, disp, pyramid):\n disp_gradients_x = [self.gradient_x(d) for d in disp]\n disp_gradients_y = [self.gradient_y(d) for d in disp]\n image_gradients_x = [self.gradient_x(img) for img in pyramid]\n image_gradients_y = [self.gradient_y(img) for img in pyramid]\n weights_x = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for\n g in image_gradients_x]\n weights_y = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for\n g in image_gradients_y]\n smoothness_x = [(disp_gradients_x[i] * weights_x[i]) for i in range(4)]\n smoothness_y = [(disp_gradients_y[i] * weights_y[i]) for i in range(4)]\n return smoothness_x + smoothness_y\n\n def build_model(self, net):\n with tf.variable_scope('model', reuse=self.reuse_variables) as scope:\n self.left_pyramid = self.scale_pyramid(self.left, 4)\n self.right_pyramid = self.scale_pyramid(self.right, 4)\n self.central_pyramid = self.scale_pyramid(self.central, 4)\n with tf.variable_scope('shared-encoder'):\n features_cr = self.build_encoder(self.central, model_name=net)\n features_cl = features_cr\n with tf.variable_scope('encoder-C2R'):\n self.disp_c2r = self.build_decoder(features_cr, model_name=net)\n with tf.variable_scope('encoder-C2L'):\n self.disp_c2l = self.build_decoder(features_cl, model_name=net)\n\n def build_encoder(self, model_input, model_name='vgg'):\n with tf.variable_scope('encoder'):\n if model_name == 'vgg':\n conv1 = conv_block(model_input, 32, 7)\n conv2 = conv_block(conv1, 64, 5)\n conv3 = conv_block(conv2, 128, 3)\n conv4 = conv_block(conv3, 256, 3)\n conv5 = conv_block(conv4, 512, 3)\n conv6 = conv_block(conv5, 512, 3)\n conv7 = conv_block(conv6, 512, 3)\n return conv7, conv1, conv2, conv3, conv4, conv5, conv6\n elif model_name == 'resnet50':\n conv1 = conv(model_input, 64, 7, 2)\n pool1 = maxpool(conv1, 3)\n conv2 = resblock(pool1, 64, 3)\n conv3 = resblock(conv2, 128, 4)\n conv4 = resblock(conv3, 256, 6)\n conv5 = resblock(conv4, 512, 3)\n return conv5, conv1, pool1, conv2, conv3, conv4\n\n def build_decoder(self, skip, model_name='vgg'):\n with tf.variable_scope('decoder'):\n if model_name == 'vgg':\n upconv7 = upconv(skip[0], 512, 3, 2)\n concat7 = tf.concat([upconv7, skip[6]], 3)\n iconv7 = conv(concat7, 512, 3, 1)\n upconv6 = upconv(iconv7, 512, 3, 2)\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n upconv5 = upconv(iconv6, 256, 3, 2)\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n upconv4 = upconv(iconv5, 128, 3, 2)\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n upconv3 = upconv(iconv4, 64, 3, 2)\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n upconv2 = upconv(iconv3, 32, 3, 2)\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n upconv1 = upconv(iconv2, 16, 3, 2)\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n elif model_name == 'resnet50':\n upconv6 = upconv(skip[0], 512, 3, 2)\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n upconv5 = upconv(iconv6, 256, 3, 2)\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n upconv4 = upconv(iconv5, 128, 3, 2)\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n upconv3 = upconv(iconv4, 64, 3, 2)\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n upconv2 = upconv(iconv3, 32, 3, 2)\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n upconv1 = upconv(iconv2, 16, 3, 2)\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n return disp1, disp2, disp3, disp4\n\n def build_outputs(self):\n with tf.variable_scope('disparities'):\n self.disp_lc = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.\n disp_c2l]\n self.disp_cl = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.\n disp_c2l]\n self.disp_cr = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.\n disp_c2r]\n self.disp_rc = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.\n disp_c2r]\n with tf.variable_scope('images'):\n self.left_est = [self.generate_image_left(self.central_pyramid[\n i], self.disp_lc[i]) for i in range(4)]\n self.cl_est = [self.generate_image_right(self.left_pyramid[i],\n self.disp_cl[i]) for i in range(4)]\n self.cr_est = [self.generate_image_left(self.right_pyramid[i],\n self.disp_cr[i]) for i in range(4)]\n self.right_est = [self.generate_image_right(self.\n central_pyramid[i], self.disp_rc[i]) for i in range(4)]\n with tf.variable_scope('left-right'):\n self.cl_to_lc_disp = [self.generate_image_left(self.disp_cl[i],\n self.disp_lc[i]) for i in range(4)]\n self.lc_to_cl_disp = [self.generate_image_right(self.disp_lc[i],\n self.disp_cl[i]) for i in range(4)]\n self.rc_to_cr_disp = [self.generate_image_left(self.disp_rc[i],\n self.disp_cr[i]) for i in range(4)]\n self.cr_to_rc_disp = [self.generate_image_right(self.disp_cr[i],\n self.disp_rc[i]) for i in range(4)]\n with tf.variable_scope('smoothness'):\n self.disp_lc_smoothness = self.get_disparity_smoothness(self.\n disp_lc, self.left_pyramid)\n self.disp_cl_smoothness = self.get_disparity_smoothness(self.\n disp_cl, self.central_pyramid)\n self.disp_cr_smoothness = self.get_disparity_smoothness(self.\n disp_cr, self.central_pyramid)\n self.disp_rc_smoothness = self.get_disparity_smoothness(self.\n disp_rc, self.right_pyramid)\n\n def build_losses(self):\n with tf.variable_scope('losses', reuse=self.reuse_variables):\n self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in\n self.l1_left]\n self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[\n i]) for i in range(4)]\n self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in\n self.l1_right]\n self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in\n self.l1_cl]\n self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in\n self.l1_cr]\n self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid\n [i]) for i in range(4)]\n self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left]\n self.ssim_right = [self.SSIM(self.right_est[i], self.\n right_pyramid[i]) for i in range(4)]\n self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right]\n self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[\n i]) for i in range(4)]\n self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl]\n self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[\n i]) for i in range(4)]\n self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr]\n self.image_loss_right = [(self.params.alpha_image_loss * self.\n ssim_loss_right[i] + (1 - self.params.alpha_image_loss) *\n self.l1_reconstruction_loss_right[i]) for i in range(4)]\n self.image_loss_left = [(self.params.alpha_image_loss * self.\n ssim_loss_left[i] + (1 - self.params.alpha_image_loss) *\n self.l1_reconstruction_loss_left[i]) for i in range(4)]\n self.image_loss_cl = [(self.params.alpha_image_loss * self.\n ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self\n .l1_reconstruction_loss_cl[i]) for i in range(4)]\n self.image_loss_cr = [(self.params.alpha_image_loss * self.\n ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self\n .l1_reconstruction_loss_cr[i]) for i in range(4)]\n self.image_loss = tf.add_n(self.image_loss_left + self.\n image_loss_cl + self.image_loss_right + self.image_loss_cr)\n self.image_loss_L = tf.add_n(self.image_loss_left + self.\n image_loss_cl)\n self.image_loss_R = tf.add_n(self.image_loss_right + self.\n image_loss_cr)\n self.disp_lc_loss = [(tf.reduce_mean(tf.abs(self.\n disp_lc_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_cl_loss = [(tf.reduce_mean(tf.abs(self.\n disp_cl_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_rc_loss = [(tf.reduce_mean(tf.abs(self.\n disp_rc_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_cr_loss = [(tf.reduce_mean(tf.abs(self.\n disp_cr_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self.\n disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss)\n self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self.\n disp_cl_loss)\n self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self.\n disp_cr_loss)\n self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] -\n self.disp_lc[i])) for i in range(4)]\n self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] -\n self.disp_cl[i])) for i in range(4)]\n self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] -\n self.disp_rc[i])) for i in range(4)]\n self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] -\n self.disp_cr[i])) for i in range(4)]\n self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss +\n self.lr_rc_loss + self.lr_cr_loss)\n self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss)\n self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss)\n self.central_disparity_dif = [tf.reduce_mean(tf.abs(self.\n disp_cl[i] - self.disp_cr[i])) for i in range(4)]\n self.central_disparity_loss = tf.add_n(self.central_disparity_dif)\n self.total_loss = (self.image_loss + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss + self.\n params.lr_loss_weight * self.lr_loss + self.\n central_disparity_loss)\n self.total_loss_L = (self.image_loss_L + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss_L + \n self.params.lr_loss_weight * self.lr_loss_L)\n self.total_loss_R = (self.image_loss_R + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss_R + \n self.params.lr_loss_weight * self.lr_loss_R)\n\n def build_summaries(self):\n with tf.device('/cpu:0'):\n for i in range(4):\n tf.summary.scalar('ssim_loss_' + str(i), self.\n ssim_loss_left[i] + self.ssim_loss_cl[i] + self.\n ssim_loss_right[i] + self.ssim_loss_cr[i], collections=\n self.model_collection)\n tf.summary.scalar('l1_loss_' + str(i), self.\n l1_reconstruction_loss_left[i] + self.\n l1_reconstruction_loss_cl[i] + self.\n l1_reconstruction_loss_right[i] + self.\n l1_reconstruction_loss_cr[i], collections=self.\n model_collection)\n tf.summary.scalar('image_loss_' + str(i), self.\n image_loss_left[i] + self.image_loss_cl[i] + self.\n image_loss_right[i] + self.image_loss_cr[i],\n collections=self.model_collection)\n tf.summary.scalar('disp_gradient_loss_' + str(i), self.\n disp_lc_loss[i] + self.disp_cl_loss[i] + self.\n disp_rc_loss[i] + self.disp_cr_loss[i], collections=\n self.model_collection)\n tf.summary.scalar('lr_loss_' + str(i), self.lr_lc_loss[i] +\n self.lr_cl_loss[i] + self.lr_rc_loss[i] + self.\n lr_cr_loss[i], collections=self.model_collection)\n tf.summary.scalar('total_loss_L', self.total_loss_L,\n collections=self.model_collection)\n tf.summary.scalar('total_loss_R', self.total_loss_R,\n collections=self.model_collection)\n tf.summary.scalar('central_disparity_loss', self.\n central_disparity_loss, collections=self.model_collection)\n tf.summary.image('disp_left_est_' + str(i), self.disp_lc[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_cl_est_' + str(i), self.disp_cl[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_right_est_' + str(i), self.disp_rc[i\n ], max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_cr_est_' + str(i), self.disp_cr[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('left_pyramid_' + str(i), self.\n left_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('central_pyramid_' + str(i), self.\n central_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('right_pyramid_' + str(i), self.\n right_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('left_est_' + str(i), self.left_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('cr_est_' + str(i), self.cr_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('cl_est_' + str(i), self.cl_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('right_est_' + str(i), self.right_est[i],\n max_outputs=4, collections=self.model_collection)\n if self.params.full_summary:\n tf.summary.image('l1_right_' + str(i), self.l1_right[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('l1_cr_' + str(i), self.l1_cr[i],\n max_outputs=4, collections=self.model_collection)\n if self.params.full_summary:\n tf.summary.image('left', self.left, max_outputs=4,\n collections=self.model_collection)\n tf.summary.image('right', self.right, max_outputs=4,\n collections=self.model_collection)\n tf.summary.image('central', self.central, max_outputs=4,\n collections=self.model_collection)\n", "step-5": "#\n# MIT License\n#\n# Copyright (c) 2018 Matteo Poggi [email protected]\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom layers import *\nfrom utils import *\nfrom collections import namedtuple\n\ntrinet_parameters = namedtuple('parameters',\n 'encoder, '\n 'height, width, '\n 'batch_size, '\n 'num_threads, '\n 'num_epochs, '\n 'alpha_image_loss, '\n 'disp_gradient_loss_weight, '\n 'lr_loss_weight, '\n 'full_summary')\n\nclass trinet(object):\n\n def __init__(self,params, mode, left, central, right, reuse_variables=None, model_index=0, net='vgg'):\n self.params = params\n self.mode = mode\n self.model_collection = ['model_0']\n self.left = left\n self.right = right\n self.central = central\n self.reuse_variables = reuse_variables\n self.model_index = model_index\n\n self.build_model(net)\n self.build_outputs()\n if self.mode == 'test':\n return\n\n self.build_losses()\n self.build_summaries()\n\n def gradient_x(self, img):\n gx = img[:,:,:-1,:] - img[:,:,1:,:]\n return gx\n\n def gradient_y(self, img):\n gy = img[:,:-1,:,:] - img[:,1:,:,:]\n return gy\n\n def scale_pyramid(self, img, num_scales):\n scaled_imgs = [img]\n s = tf.shape(img)\n h = s[1]\n w = s[2]\n for i in range(num_scales - 1):\n ratio = 2 ** (i + 1)\n nh = h // ratio\n nw = w // ratio\n scaled_imgs.append(tf.image.resize_area(img, [nh, nw]))\n return scaled_imgs\n\n def generate_image_left(self, img, disp):\n return bilinear_sampler_1d_h(img, -disp)\n\n def generate_image_right(self, img, disp):\n return bilinear_sampler_1d_h(img, disp)\n\n def SSIM(self, x, y):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n\n mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')\n mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')\n\n sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2\n sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2\n sigma_xy = slim.avg_pool2d(x * y , 3, 1, 'VALID') - mu_x * mu_y\n\n SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)\n SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)\n\n SSIM = SSIM_n / SSIM_d\n\n return tf.clip_by_value((1 - SSIM) / 2, 0, 1)\n\n\n def get_disparity_smoothness(self, disp, pyramid):\n disp_gradients_x = [self.gradient_x(d) for d in disp]\n disp_gradients_y = [self.gradient_y(d) for d in disp]\n\n image_gradients_x = [self.gradient_x(img) for img in pyramid]\n image_gradients_y = [self.gradient_y(img) for img in pyramid]\n\n weights_x = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for g in image_gradients_x]\n weights_y = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for g in image_gradients_y]\n\n smoothness_x = [disp_gradients_x[i] * weights_x[i] for i in range(4)]\n smoothness_y = [disp_gradients_y[i] * weights_y[i] for i in range(4)]\n return smoothness_x + smoothness_y\n\n # Build model\n def build_model(self,net): \n with tf.variable_scope('model', reuse=self.reuse_variables) as scope:\n self.left_pyramid = self.scale_pyramid(self.left, 4)\n # if self.mode == 'train':\n self.right_pyramid = self.scale_pyramid(self.right, 4)\n self.central_pyramid = self.scale_pyramid(self.central, 4)\n\n with tf.variable_scope('shared-encoder'):\n features_cr = self.build_encoder(self.central,model_name=net)\n features_cl = features_cr\n with tf.variable_scope('encoder-C2R'):\n self.disp_c2r = self.build_decoder(features_cr,model_name=net)\n with tf.variable_scope('encoder-C2L'):\n self.disp_c2l = self.build_decoder(features_cl,model_name=net)\n \n # Build shared encoder\n def build_encoder(self, model_input, model_name='vgg'):\n\n with tf.variable_scope('encoder'):\n if model_name == 'vgg':\n conv1 = conv_block(model_input, 32, 7) # H/2\n conv2 = conv_block(conv1, 64, 5) # H/4\n conv3 = conv_block(conv2, 128, 3) # H/8\n conv4 = conv_block(conv3, 256, 3) # H/16\n conv5 = conv_block(conv4, 512, 3) # H/32\n conv6 = conv_block(conv5, 512, 3) # H/64\n conv7 = conv_block(conv6, 512, 3) # H/128 \n return conv7, conv1, conv2, conv3, conv4, conv5, conv6\n\n elif model_name == 'resnet50':\n conv1 = conv(model_input, 64, 7, 2) # H/2 - 64D\n pool1 = maxpool(conv1, 3) # H/4 - 64D\n conv2 = resblock(pool1, 64, 3) # H/8 - 256D\n conv3 = resblock(conv2, 128, 4) # H/16 - 512D\n conv4 = resblock(conv3, 256, 6) # H/32 - 1024D\n conv5 = resblock(conv4, 512, 3) # H/64 - 2048D\n return conv5, conv1, pool1, conv2, conv3, conv4 \n\n def build_decoder(self, skip, model_name='vgg'):\n\n with tf.variable_scope('decoder'):\n if model_name == 'vgg': \n upconv7 = upconv(skip[0], 512, 3, 2) #H/64\n concat7 = tf.concat([upconv7, skip[6]], 3)\n iconv7 = conv(concat7, 512, 3, 1)\n\n upconv6 = upconv(iconv7, 512, 3, 2) #H/32\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n\n upconv5 = upconv(iconv6, 256, 3, 2) #H/16\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n\n upconv4 = upconv(iconv5, 128, 3, 2) #H/8\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n\n upconv3 = upconv(iconv4, 64, 3, 2) #H/4\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n\n upconv2 = upconv(iconv3, 32, 3, 2) #H/2\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n\n upconv1 = upconv(iconv2, 16, 3, 2) #H\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n\n elif model_name == 'resnet50': \n upconv6 = upconv(skip[0], 512, 3, 2) #H/32\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n\n upconv5 = upconv(iconv6, 256, 3, 2) #H/16\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n\n upconv4 = upconv(iconv5, 128, 3, 2) #H/8\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n\n upconv3 = upconv(iconv4, 64, 3, 2) #H/4\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n\n upconv2 = upconv(iconv3, 32, 3, 2) #H/2\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n\n upconv1 = upconv(iconv2, 16, 3, 2) #H\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n\n return disp1, disp2, disp3, disp4 \n def build_outputs(self):\n #self.disparity_cr = self.disp_cr[0][0,:,:,0]\n #self.disparity_cl = self.disp_cl[0][0,:,:,0]\n #self.warp_left = generate_image_left(self.placeholders['im0'], self.disparity_cl)[0]\n #self.warp_right = generate_image_right(self.placeholders['im0'], self.disparity_cr)[0]\n\n # STORE DISPARITIES\n with tf.variable_scope('disparities'):\n\n self.disp_lc = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.disp_c2l]\n self.disp_cl = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.disp_c2l]\n\n self.disp_cr = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.disp_c2r]\n self.disp_rc = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.disp_c2r]\n\n # GENERATE IMAGES\n with tf.variable_scope('images'):\n self.left_est = [self.generate_image_left(self.central_pyramid[i], self.disp_lc[i]) for i in range(4)]\n self.cl_est = [self.generate_image_right(self.left_pyramid[i], self.disp_cl[i]) for i in range(4)]\n\n self.cr_est = [self.generate_image_left(self.right_pyramid[i], self.disp_cr[i]) for i in range(4)]\n self.right_est = [self.generate_image_right(self.central_pyramid[i], self.disp_rc[i]) for i in range(4)]\n\n # LR CONSISTENCY\n with tf.variable_scope('left-right'):\n self.cl_to_lc_disp = [self.generate_image_left(self.disp_cl[i], self.disp_lc[i]) for i in range(4)]\n self.lc_to_cl_disp = [self.generate_image_right(self.disp_lc[i], self.disp_cl[i]) for i in range(4)]\n\n self.rc_to_cr_disp = [self.generate_image_left(self.disp_rc[i], self.disp_cr[i]) for i in range(4)]\n self.cr_to_rc_disp = [self.generate_image_right(self.disp_cr[i], self.disp_rc[i]) for i in range(4)]\n\n # DISPARITY SMOOTHNESS\n with tf.variable_scope('smoothness'):\n self.disp_lc_smoothness = self.get_disparity_smoothness(self.disp_lc, self.left_pyramid)\n self.disp_cl_smoothness = self.get_disparity_smoothness(self.disp_cl, self.central_pyramid)\n\n self.disp_cr_smoothness = self.get_disparity_smoothness(self.disp_cr, self.central_pyramid)\n self.disp_rc_smoothness = self.get_disparity_smoothness(self.disp_rc, self.right_pyramid)\n\n def build_losses(self):\n with tf.variable_scope('losses', reuse=self.reuse_variables):\n # IMAGE RECONSTRUCTION\n # L1\n self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for i in range(4)]\n self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in self.l1_left]\n\n self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[i]) for i in range(4)]\n self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in self.l1_right]\n\n self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for i in range(4)]\n self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in self.l1_cl]\n\n self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for i in range(4)]\n self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in self.l1_cr]\n\n # SSIM\n self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid[i]) for i in range(4)]\n self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left]\n\n self.ssim_right = [self.SSIM(self.right_est[i], self.right_pyramid[i]) for i in range(4)]\n self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right]\n\n self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[i]) for i in range(4)]\n self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl]\n\n self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[i]) for i in range(4)]\n self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr]\n\n # WEIGTHED SUM\n self.image_loss_right = [self.params.alpha_image_loss * self.ssim_loss_right[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_right[i] for i in range(4)]\n self.image_loss_left = [self.params.alpha_image_loss * self.ssim_loss_left[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_left[i] for i in range(4)]\n self.image_loss_cl = [self.params.alpha_image_loss * self.ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_cl[i] for i in range(4)]\n self.image_loss_cr = [self.params.alpha_image_loss * self.ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_cr[i] for i in range(4)]\n\n self.image_loss = tf.add_n(self.image_loss_left + self.image_loss_cl + self.image_loss_right + self.image_loss_cr)\n\n self.image_loss_L = tf.add_n(self.image_loss_left + self.image_loss_cl)\n self.image_loss_R = tf.add_n(self.image_loss_right + self.image_loss_cr)\n\n\n # DISPARITY SMOOTHNESS\n self.disp_lc_loss = [tf.reduce_mean(tf.abs(self.disp_lc_smoothness[i])) / 2 ** i for i in range(4)]\n self.disp_cl_loss = [tf.reduce_mean(tf.abs(self.disp_cl_smoothness[i])) / 2 ** i for i in range(4)]\n\n self.disp_rc_loss = [tf.reduce_mean(tf.abs(self.disp_rc_smoothness[i])) / 2 ** i for i in range(4)]\n self.disp_cr_loss = [tf.reduce_mean(tf.abs(self.disp_cr_smoothness[i])) / 2 ** i for i in range(4)]\n\n self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self.disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss)\n\n self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self.disp_cl_loss)\n self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self.disp_cr_loss)\n\n\n # LR CONSISTENCY\n self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] - self.disp_lc[i])) for i in range(4)]\n self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] - self.disp_cl[i])) for i in range(4)]\n\n self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] - self.disp_rc[i])) for i in range(4)]\n self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] - self.disp_cr[i])) for i in range(4)]\n\n\n self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss + self.lr_rc_loss + self.lr_cr_loss)\n\n self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss)\n self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss)\n\n # CENTRAL DISPARITY CONSISTENCY\n self.central_disparity_dif = [tf.reduce_mean(tf.abs(self.disp_cl[i] - self.disp_cr[i])) for i in range(4)]\n self.central_disparity_loss = tf.add_n(self.central_disparity_dif)\n\n # TOTAL LOSS\n self.total_loss = self.image_loss + self.params.disp_gradient_loss_weight * self.disp_gradient_loss + self.params.lr_loss_weight * self.lr_loss + self.central_disparity_loss\n\n self.total_loss_L = self.image_loss_L + self.params.disp_gradient_loss_weight * self.disp_gradient_loss_L + self.params.lr_loss_weight * self.lr_loss_L\n self.total_loss_R = self.image_loss_R + self.params.disp_gradient_loss_weight * self.disp_gradient_loss_R + self.params.lr_loss_weight * self.lr_loss_R\n\n def build_summaries(self):\n # SUMMARIES\n with tf.device('/cpu:0'):\n for i in range(4):\n tf.summary.scalar('ssim_loss_' + str(i), self.ssim_loss_left[i] + self.ssim_loss_cl[i] + self.ssim_loss_right[i] + self.ssim_loss_cr[i], collections=self.model_collection)\n tf.summary.scalar('l1_loss_' + str(i), self.l1_reconstruction_loss_left[i] + self.l1_reconstruction_loss_cl[i] + self.l1_reconstruction_loss_right[i] + self.l1_reconstruction_loss_cr[i], collections=self.model_collection)\n tf.summary.scalar('image_loss_' + str(i), self.image_loss_left[i] + self.image_loss_cl[i] + self.image_loss_right[i] + self.image_loss_cr[i], collections=self.model_collection)\n tf.summary.scalar('disp_gradient_loss_' + str(i), self.disp_lc_loss[i] + self.disp_cl_loss[i] + self.disp_rc_loss[i] + self.disp_cr_loss[i], collections=self.model_collection)\n tf.summary.scalar('lr_loss_' + str(i), self.lr_lc_loss[i] + self.lr_cl_loss[i] + self.lr_rc_loss[i] + self.lr_cr_loss[i], collections=self.model_collection)\n tf.summary.scalar('total_loss_L', self.total_loss_L, collections= self.model_collection)\n tf.summary.scalar('total_loss_R', self.total_loss_R, collections=self.model_collection)\n tf.summary.scalar('central_disparity_loss', self.central_disparity_loss, collections=self.model_collection)\n tf.summary.image('disp_left_est_' + str(i), self.disp_lc[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_cl_est_' + str(i), self.disp_cl[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_right_est_' + str(i), self.disp_rc[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_cr_est_' + str(i), self.disp_cr[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('left_pyramid_' + str(i), self.left_pyramid[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('central_pyramid_' + str(i), self.central_pyramid[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('right_pyramid_' + str(i), self.right_pyramid[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('left_est_' + str(i), self.left_est[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('cr_est_' + str(i), self.cr_est[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('cl_est_' + str(i), self.cl_est[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('right_est_' + str(i), self.right_est[i], max_outputs=4, collections=self.model_collection)\n\n if self.params.full_summary:\n #tf.summary.image('left_est_' + str(i), self.left_est[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('right_est_' + str(i), self.right_est[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('cl_est_' + str(i), self.cl_est[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('cr_est_' + str(i), self.cr_est[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('ssim_left_' + str(i), self.ssim_left[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('ssim_right_' + str(i), self.ssim_right[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('ssim_cl_' + str(i), self.ssim_cl[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('ssim_cr_' + str(i), self.ssim_cr[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('l1_left_' + str(i), self.l1_left[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('l1_right_' + str(i), self.l1_right[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('l1_cl_' + str(i), self.l1_cl[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('l1_cr_' + str(i), self.l1_cr[i], max_outputs=4, collections=self.model_collection)\n\n if self.params.full_summary:\n tf.summary.image('left', self.left, max_outputs=4, collections=self.model_collection)\n tf.summary.image('right', self.right, max_outputs=4, collections=self.model_collection)\n tf.summary.image('central', self.central, max_outputs=4, collections=self.model_collection)", "step-ids": [ 8, 14, 15, 17, 18 ] }
[ 8, 14, 15, 17, 18 ]
from django.views.generic import ListView class ExperimentList(ListView): pass
normal
{ "blob_id": "10990282c8aa0b9b26a69e451132ff37257acbc6", "index": 3331, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass ExperimentList(ListView):\n pass\n", "step-3": "from django.views.generic import ListView\n\n\nclass ExperimentList(ListView):\n pass\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import xlrd from django.shortcuts import redirect from django.contrib import messages from django.utils.translation import ugettext_lazy as _ from django.core import validators from utils.views import render_to from accounts.models import Account from .models import ExternalSubscriber from .forms import ExternalSubscriberUpload def validate_email(value, row_number): error_message = _(u'Invalid e-mail address on "%d" line.') return validators.EmailValidator( validators.email_re, unicode(error_message % row_number), 'invalid' )(value) def upload_handler(file_obj, path_to_save): destination = open(path_to_save, 'wb+') for chunk in file_obj.chunks(): destination.write(chunk) destination.close() def get_externalsubscribers(file_obj): pass_count = 0 fail_count = 0 PATH = '/tmp/import_subscribers.xls' upload_handler(file_obj, PATH) sheet = xlrd.open_workbook(PATH).sheet_by_index(0) for i in range(1,sheet.nrows): row = sheet.row(i) if not row[0].value: continue subscriber = {} subscriber['email'] = row[0].value try: validate_email(subscriber['email'].strip(), i) pass_count+=1 except Exception as e: fail_count+=1 #print e, u'"%s"' % subscriber['email'] continue try: subscriber['first_name'] = row[1].value except IndexError: pass try: subscriber['last_name'] = row[2].value except IndexError: pass if not bool(Account.objects.filter(email=subscriber['email']).only('id')): obj, created = ExternalSubscriber.objects.get_or_create( email=subscriber['email'], defaults={ 'first_name': subscriber.get('first_name'), 'last_name': subscriber.get('last_name'), } ) if not created: for field in ['first_name', 'last_name']: if subscriber.get(field) and\ getattr(obj, field) != subscriber.get(field): setattr(obj, field, subscriber.get(field)) obj.save() return pass_count, fail_count @render_to('newsletter/import_subscribers_form.html') def import_subscribers(request): if request.method == 'POST': form = ExternalSubscriberUpload(request.POST, request.FILES) if form.is_valid(): passed, failed = get_externalsubscribers(form.cleaned_data['xls']) messages.add_message(request, messages.INFO, _('Subscribers successfuly imported. %(passed)d added and %(failed)d failed ') % {'passed':passed, 'failed': failed}) return redirect('admin:newsletter_externalsubscriber_changelist') else: form = ExternalSubscriberUpload() return {'form': form}
normal
{ "blob_id": "2ec41e02c95a270455c096e85829b7220eeda0c7", "index": 1317, "step-1": "<mask token>\n\n\ndef validate_email(value, row_number):\n error_message = _(u'Invalid e-mail address on \"%d\" line.')\n return validators.EmailValidator(validators.email_re, unicode(\n error_message % row_number), 'invalid')(value)\n\n\n<mask token>\n\n\ndef get_externalsubscribers(file_obj):\n pass_count = 0\n fail_count = 0\n PATH = '/tmp/import_subscribers.xls'\n upload_handler(file_obj, PATH)\n sheet = xlrd.open_workbook(PATH).sheet_by_index(0)\n for i in range(1, sheet.nrows):\n row = sheet.row(i)\n if not row[0].value:\n continue\n subscriber = {}\n subscriber['email'] = row[0].value\n try:\n validate_email(subscriber['email'].strip(), i)\n pass_count += 1\n except Exception as e:\n fail_count += 1\n continue\n try:\n subscriber['first_name'] = row[1].value\n except IndexError:\n pass\n try:\n subscriber['last_name'] = row[2].value\n except IndexError:\n pass\n if not bool(Account.objects.filter(email=subscriber['email']).only(\n 'id')):\n obj, created = ExternalSubscriber.objects.get_or_create(email=\n subscriber['email'], defaults={'first_name': subscriber.get\n ('first_name'), 'last_name': subscriber.get('last_name')})\n if not created:\n for field in ['first_name', 'last_name']:\n if subscriber.get(field) and getattr(obj, field\n ) != subscriber.get(field):\n setattr(obj, field, subscriber.get(field))\n obj.save()\n return pass_count, fail_count\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef validate_email(value, row_number):\n error_message = _(u'Invalid e-mail address on \"%d\" line.')\n return validators.EmailValidator(validators.email_re, unicode(\n error_message % row_number), 'invalid')(value)\n\n\ndef upload_handler(file_obj, path_to_save):\n destination = open(path_to_save, 'wb+')\n for chunk in file_obj.chunks():\n destination.write(chunk)\n destination.close()\n\n\ndef get_externalsubscribers(file_obj):\n pass_count = 0\n fail_count = 0\n PATH = '/tmp/import_subscribers.xls'\n upload_handler(file_obj, PATH)\n sheet = xlrd.open_workbook(PATH).sheet_by_index(0)\n for i in range(1, sheet.nrows):\n row = sheet.row(i)\n if not row[0].value:\n continue\n subscriber = {}\n subscriber['email'] = row[0].value\n try:\n validate_email(subscriber['email'].strip(), i)\n pass_count += 1\n except Exception as e:\n fail_count += 1\n continue\n try:\n subscriber['first_name'] = row[1].value\n except IndexError:\n pass\n try:\n subscriber['last_name'] = row[2].value\n except IndexError:\n pass\n if not bool(Account.objects.filter(email=subscriber['email']).only(\n 'id')):\n obj, created = ExternalSubscriber.objects.get_or_create(email=\n subscriber['email'], defaults={'first_name': subscriber.get\n ('first_name'), 'last_name': subscriber.get('last_name')})\n if not created:\n for field in ['first_name', 'last_name']:\n if subscriber.get(field) and getattr(obj, field\n ) != subscriber.get(field):\n setattr(obj, field, subscriber.get(field))\n obj.save()\n return pass_count, fail_count\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef validate_email(value, row_number):\n error_message = _(u'Invalid e-mail address on \"%d\" line.')\n return validators.EmailValidator(validators.email_re, unicode(\n error_message % row_number), 'invalid')(value)\n\n\ndef upload_handler(file_obj, path_to_save):\n destination = open(path_to_save, 'wb+')\n for chunk in file_obj.chunks():\n destination.write(chunk)\n destination.close()\n\n\ndef get_externalsubscribers(file_obj):\n pass_count = 0\n fail_count = 0\n PATH = '/tmp/import_subscribers.xls'\n upload_handler(file_obj, PATH)\n sheet = xlrd.open_workbook(PATH).sheet_by_index(0)\n for i in range(1, sheet.nrows):\n row = sheet.row(i)\n if not row[0].value:\n continue\n subscriber = {}\n subscriber['email'] = row[0].value\n try:\n validate_email(subscriber['email'].strip(), i)\n pass_count += 1\n except Exception as e:\n fail_count += 1\n continue\n try:\n subscriber['first_name'] = row[1].value\n except IndexError:\n pass\n try:\n subscriber['last_name'] = row[2].value\n except IndexError:\n pass\n if not bool(Account.objects.filter(email=subscriber['email']).only(\n 'id')):\n obj, created = ExternalSubscriber.objects.get_or_create(email=\n subscriber['email'], defaults={'first_name': subscriber.get\n ('first_name'), 'last_name': subscriber.get('last_name')})\n if not created:\n for field in ['first_name', 'last_name']:\n if subscriber.get(field) and getattr(obj, field\n ) != subscriber.get(field):\n setattr(obj, field, subscriber.get(field))\n obj.save()\n return pass_count, fail_count\n\n\n@render_to('newsletter/import_subscribers_form.html')\ndef import_subscribers(request):\n if request.method == 'POST':\n form = ExternalSubscriberUpload(request.POST, request.FILES)\n if form.is_valid():\n passed, failed = get_externalsubscribers(form.cleaned_data['xls'])\n messages.add_message(request, messages.INFO, _(\n 'Subscribers successfuly imported. %(passed)d added and %(failed)d failed '\n ) % {'passed': passed, 'failed': failed})\n return redirect('admin:newsletter_externalsubscriber_changelist')\n else:\n form = ExternalSubscriberUpload()\n return {'form': form}\n", "step-4": "import xlrd\nfrom django.shortcuts import redirect\nfrom django.contrib import messages\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core import validators\nfrom utils.views import render_to\nfrom accounts.models import Account\nfrom .models import ExternalSubscriber\nfrom .forms import ExternalSubscriberUpload\n\n\ndef validate_email(value, row_number):\n error_message = _(u'Invalid e-mail address on \"%d\" line.')\n return validators.EmailValidator(validators.email_re, unicode(\n error_message % row_number), 'invalid')(value)\n\n\ndef upload_handler(file_obj, path_to_save):\n destination = open(path_to_save, 'wb+')\n for chunk in file_obj.chunks():\n destination.write(chunk)\n destination.close()\n\n\ndef get_externalsubscribers(file_obj):\n pass_count = 0\n fail_count = 0\n PATH = '/tmp/import_subscribers.xls'\n upload_handler(file_obj, PATH)\n sheet = xlrd.open_workbook(PATH).sheet_by_index(0)\n for i in range(1, sheet.nrows):\n row = sheet.row(i)\n if not row[0].value:\n continue\n subscriber = {}\n subscriber['email'] = row[0].value\n try:\n validate_email(subscriber['email'].strip(), i)\n pass_count += 1\n except Exception as e:\n fail_count += 1\n continue\n try:\n subscriber['first_name'] = row[1].value\n except IndexError:\n pass\n try:\n subscriber['last_name'] = row[2].value\n except IndexError:\n pass\n if not bool(Account.objects.filter(email=subscriber['email']).only(\n 'id')):\n obj, created = ExternalSubscriber.objects.get_or_create(email=\n subscriber['email'], defaults={'first_name': subscriber.get\n ('first_name'), 'last_name': subscriber.get('last_name')})\n if not created:\n for field in ['first_name', 'last_name']:\n if subscriber.get(field) and getattr(obj, field\n ) != subscriber.get(field):\n setattr(obj, field, subscriber.get(field))\n obj.save()\n return pass_count, fail_count\n\n\n@render_to('newsletter/import_subscribers_form.html')\ndef import_subscribers(request):\n if request.method == 'POST':\n form = ExternalSubscriberUpload(request.POST, request.FILES)\n if form.is_valid():\n passed, failed = get_externalsubscribers(form.cleaned_data['xls'])\n messages.add_message(request, messages.INFO, _(\n 'Subscribers successfuly imported. %(passed)d added and %(failed)d failed '\n ) % {'passed': passed, 'failed': failed})\n return redirect('admin:newsletter_externalsubscriber_changelist')\n else:\n form = ExternalSubscriberUpload()\n return {'form': form}\n", "step-5": "import xlrd\nfrom django.shortcuts import redirect\nfrom django.contrib import messages\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core import validators\n\nfrom utils.views import render_to\nfrom accounts.models import Account\n\nfrom .models import ExternalSubscriber\nfrom .forms import ExternalSubscriberUpload\n\ndef validate_email(value, row_number):\n error_message = _(u'Invalid e-mail address on \"%d\" line.')\n return validators.EmailValidator(\n validators.email_re,\n unicode(error_message % row_number),\n 'invalid'\n )(value)\n\ndef upload_handler(file_obj, path_to_save):\n destination = open(path_to_save, 'wb+')\n for chunk in file_obj.chunks():\n destination.write(chunk)\n destination.close()\n\ndef get_externalsubscribers(file_obj):\n pass_count = 0\n fail_count = 0\n PATH = '/tmp/import_subscribers.xls'\n upload_handler(file_obj, PATH)\n sheet = xlrd.open_workbook(PATH).sheet_by_index(0)\n for i in range(1,sheet.nrows):\n row = sheet.row(i)\n if not row[0].value:\n continue\n subscriber = {}\n subscriber['email'] = row[0].value\n try:\n validate_email(subscriber['email'].strip(), i)\n pass_count+=1\n except Exception as e:\n fail_count+=1\n #print e, u'\"%s\"' % subscriber['email']\n continue\n\n try:\n subscriber['first_name'] = row[1].value\n except IndexError:\n pass\n\n try:\n subscriber['last_name'] = row[2].value\n except IndexError:\n pass\n\n if not bool(Account.objects.filter(email=subscriber['email']).only('id')):\n obj, created = ExternalSubscriber.objects.get_or_create(\n email=subscriber['email'],\n defaults={\n 'first_name': subscriber.get('first_name'),\n 'last_name': subscriber.get('last_name'),\n }\n )\n if not created:\n for field in ['first_name', 'last_name']:\n if subscriber.get(field) and\\\n getattr(obj, field) != subscriber.get(field):\n setattr(obj, field, subscriber.get(field))\n obj.save()\n\n return pass_count, fail_count\n\n@render_to('newsletter/import_subscribers_form.html')\ndef import_subscribers(request):\n if request.method == 'POST':\n form = ExternalSubscriberUpload(request.POST, request.FILES)\n if form.is_valid():\n passed, failed = get_externalsubscribers(form.cleaned_data['xls'])\n messages.add_message(request, messages.INFO, _('Subscribers successfuly imported. %(passed)d added and %(failed)d failed ') % {'passed':passed, 'failed': failed})\n\n return redirect('admin:newsletter_externalsubscriber_changelist')\n else:\n form = ExternalSubscriberUpload()\n return {'form': form}\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
a = ['a', 'b', 'c', 'd', 'e'] print(';'.join(a))
normal
{ "blob_id": "a10403d7809b97c1bcdfa73224b8c365519cc456", "index": 7275, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(';'.join(a))\n", "step-3": "a = ['a', 'b', 'c', 'd', 'e']\nprint(';'.join(a))\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import sys def main(): lines = [line.strip() for line in sys.stdin.readlines()] h = lines.index("") w = len(lines[0].split()[0]) start = 0 grids = set() while start < len(lines): grid = tuple(x.split()[0] for x in lines[start:start + h]) if len(grid) == h: grids.add(grid) start += h + 1 print >> sys.stderr, len(grids) for grid in grids: for line in grid: print line print main()
normal
{ "blob_id": "6ef8a174dcce633b526ce7d6fdb6ceb11089b177", "index": 3652, "step-1": "import sys\n\ndef main():\n lines = [line.strip() for line in sys.stdin.readlines()]\n h = lines.index(\"\")\n w = len(lines[0].split()[0])\n start = 0\n grids = set()\n while start < len(lines):\n grid = tuple(x.split()[0] for x in lines[start:start + h])\n if len(grid) == h:\n grids.add(grid)\n start += h + 1\n print >> sys.stderr, len(grids)\n for grid in grids:\n for line in grid:\n print line\n print\n\nmain()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> class AtomExtensionGrammar(extension.ExtensionGrammar): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class AtomExtensionGrammar(extension.ExtensionGrammar): <|reserved_special_token_0|> <|reserved_special_token_0|> def __init__(self): super().__init__() self.app_context = 'Autumntastic' self.mappings = {} <|reserved_special_token_1|> <|reserved_special_token_0|> class AtomExtensionGrammar(extension.ExtensionGrammar): activate = '{ctrl+alt+8}' search_chars = bu.merge_dicts(bu.OPERATORS, bu.ALPHABET, bu.CHAR_MAP) def __init__(self): super().__init__() self.app_context = 'Autumntastic' self.mappings = {} <|reserved_special_token_1|> from pynhost.grammars import extension from pynhost.grammars import baseutils as bu class AtomExtensionGrammar(extension.ExtensionGrammar): activate = '{ctrl+alt+8}' search_chars = bu.merge_dicts(bu.OPERATORS, bu.ALPHABET, bu.CHAR_MAP) def __init__(self): super().__init__() self.app_context = 'Autumntastic' self.mappings = {}
flexible
{ "blob_id": "ac5c6a534d5131438d9590b070e6b392d4ebed0c", "index": 9764, "step-1": "<mask token>\n\n\nclass AtomExtensionGrammar(extension.ExtensionGrammar):\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass AtomExtensionGrammar(extension.ExtensionGrammar):\n <mask token>\n <mask token>\n\n def __init__(self):\n super().__init__()\n self.app_context = 'Autumntastic'\n self.mappings = {}\n", "step-3": "<mask token>\n\n\nclass AtomExtensionGrammar(extension.ExtensionGrammar):\n activate = '{ctrl+alt+8}'\n search_chars = bu.merge_dicts(bu.OPERATORS, bu.ALPHABET, bu.CHAR_MAP)\n\n def __init__(self):\n super().__init__()\n self.app_context = 'Autumntastic'\n self.mappings = {}\n", "step-4": "from pynhost.grammars import extension\nfrom pynhost.grammars import baseutils as bu\n\n\nclass AtomExtensionGrammar(extension.ExtensionGrammar):\n activate = '{ctrl+alt+8}'\n search_chars = bu.merge_dicts(bu.OPERATORS, bu.ALPHABET, bu.CHAR_MAP)\n\n def __init__(self):\n super().__init__()\n self.app_context = 'Autumntastic'\n self.mappings = {}\n", "step-5": null, "step-ids": [ 1, 2, 3, 4 ] }
[ 1, 2, 3, 4 ]
import numpy as np import cPickle as pkl data_l = [] data_path = "/home/marc/data/" with open(data_path+'covtype.data') as fp: for line in fp: tmp_l = [ int(elem) for elem in line.split(',') ] data_l.append(tmp_l) data = np.array(data_l) np.random.shuffle(data) quintil = data.shape[0]/5 train_x = data[:quintil*3, :-1] train_y = (data[:quintil*3, -1]-1).reshape((-1,1)).astype(int) valid_x = data[quintil*3:quintil*4, :-1] valid_y = (data[quintil*3:quintil*4, -1]-1).reshape((-1,1)).astype(int) test_x = data[quintil*4:quintil*5, :-1] test_y = (data[quintil*4:quintil*5, -1]-1).reshape((-1,1)).astype(int) np.equal(data[:,-1],np.ones(data[:,-1].shape)).sum() np.equal(data[:,-1],np.ones(data[:,-1].shape)+1).sum() np.equal(data[:,-1],np.ones(data[:,-1].shape)+2).sum() dss = [train_x, train_y, valid_x, valid_y, test_x , test_y] names = ["train_x", "train_y", "valid_x", "valid_y", "test_x", "test_y"] for ds,name in zip(dss, names): f = open(data_path+"COV_"+name+".pkl", "wb") pkl.dump(ds,f)
normal
{ "blob_id": "c8975306473dda49be6c5f19f6663214ec7e7105", "index": 7655, "step-1": "import numpy as np\nimport cPickle as pkl\n\n\n\ndata_l = []\ndata_path = \"/home/marc/data/\"\nwith open(data_path+'covtype.data') as fp:\n for line in fp:\n\t\ttmp_l = [ int(elem) for elem in line.split(',') ]\n\t\tdata_l.append(tmp_l)\n\n\ndata = np.array(data_l)\nnp.random.shuffle(data)\n\nquintil = data.shape[0]/5\ntrain_x = data[:quintil*3, :-1]\ntrain_y = (data[:quintil*3, -1]-1).reshape((-1,1)).astype(int)\nvalid_x = data[quintil*3:quintil*4, :-1]\nvalid_y = (data[quintil*3:quintil*4, -1]-1).reshape((-1,1)).astype(int)\ntest_x = data[quintil*4:quintil*5, :-1]\ntest_y = (data[quintil*4:quintil*5, -1]-1).reshape((-1,1)).astype(int)\n\nnp.equal(data[:,-1],np.ones(data[:,-1].shape)).sum()\nnp.equal(data[:,-1],np.ones(data[:,-1].shape)+1).sum()\nnp.equal(data[:,-1],np.ones(data[:,-1].shape)+2).sum()\n\n\ndss = [train_x, train_y, valid_x, valid_y, test_x , test_y]\nnames = [\"train_x\", \"train_y\", \"valid_x\", \"valid_y\", \"test_x\", \"test_y\"]\n\nfor ds,name in zip(dss, names):\n\tf = open(data_path+\"COV_\"+name+\".pkl\", \"wb\")\n\tpkl.dump(ds,f)", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> class TopoPlot(object): <|reserved_special_token_0|> def __init__(self, data=None, axes=None): """Setup defaults. Parameters ---------- data : Pandas.Series or dict Pandas Series with values indexed by electrodes. axes : matplotlib.axes.AxesSubplot object Axis object to render on. """ if axes is None: self.figure = plt.figure() axes = self.figure.gca() else: self.figure = axes.get_figure() self.axes = axes self.center = np.array((0, 0)) if isinstance(data, dict): self.data = pd.Series(data) elif isinstance(data, pd.Series): self.data = data elif data is None: self.data = None else: raise ValueError("Wrong type of value for 'data': {}".format( type(data))) <|reserved_special_token_0|> def draw_electrodes(self): """Draw electrodes.""" for electrode, position in ELECTRODES.items(): circle = plt.Circle(self.center + position, radius=0.04, fill= True, facecolor=(1, 1, 1)) self.axes.add_patch(circle) position = self.center + position self.axes.text(position[0], position[1], electrode, verticalalignment='center', horizontalalignment='center', size=6) def draw_head(self): """Draw outer head.""" circle = plt.Circle(self.center, radius=1, fill=False) self.axes.add_patch(circle) <|reserved_special_token_0|> <|reserved_special_token_0|> def draw_data(self, method='linear', number_of_contours=10): """Draw countours from provided data.""" if self.data is not None: xi, yi = np.mgrid[-1:1:100.0j, -1:1:100.0j] points = [] for electrode in self.data.index: name = TopoPlot.normalize_electrode_name(electrode) points.append(ELECTRODES[name]) zi = griddata(points, self.data.values, (xi, yi), method=method) if number_of_contours is None: number_of_contours = 10 plt.contourf(xi, yi, zi, number_of_contours) def draw(self, title=None, method='linear', number_of_contours=None): """Draw all components in topoplot including the data. Parameters ---------- title : str, optional Title to put on the plot methods : str, optional Interpolation method number_of_contours : int Number of contours in the colored plot. Examples -------- >>> import matplotlib.pyplot as plt >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4} >>> plt.ion() >>> topo_plot = TopoPlot(data) >>> topo_plot.draw() """ self.draw_head() self.draw_inner_head() self.draw_electrodes() self.draw_nose() self.draw_data(method=method, number_of_contours=number_of_contours) self.axes.axis((-1.2, 1.2, -1.2, 1.2)) self.axes.axis('equal') if title is not None: self.axes.set_title(title) class MultiPlot(TopoPlot): """Multiple plots organized topographically. References ---------- http://www.fieldtriptoolbox.org/reference/ft_multiploter """ def __init__(self, data=None, axes=None, xlim=None, ylim=None): """Setup defaults. Parameters ---------- data : Pandas.DataFrame Pandas DataFrame with values indexed by electrodes. axes : matplotlib.axes.AxesSubplot object Axis object to render on. """ if axes is None: self.figure = plt.figure() axes = self.figure.gca() else: self.figure = axes.get_figure() self.axes = axes self._subaxes = [] self.xlim = xlim self.ylim = ylim self.center = np.array((0, 0)) if isinstance(data, pd.DataFrame): self.data = data elif data is None: self.data = None else: raise ValueError("Wrong type of value for 'data': {}".format( type(data))) def add_subplot_axes(self, ax, rect, axis_bgcolor=None): """Add subaxes to currect specified axes. References ---------- Pablo https://stackoverflow.com/users/2309442/pablo Pablo's answer to "Embedding small plots inside subplots in matplotlib" https://stackoverflow.com/questions/17458580/ """ box = ax.get_position() width, height = box.width, box.height subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3])] subaxes_display_coords = ax.transData.transform(subaxes_box) trans_figure = self.figure.transFigure.inverted() subaxes_figure_coords = trans_figure.transform(subaxes_display_coords) x, y = subaxes_figure_coords[0, :] width, height = subaxes_figure_coords[1, :] - subaxes_figure_coords[ 0, :] subaxes = self.figure.add_axes([x, y, width, height], axis_bgcolor= axis_bgcolor) x_labelsize = subaxes.get_xticklabels()[0].get_size() y_labelsize = subaxes.get_yticklabels()[0].get_size() x_labelsize *= rect[2] ** 0.5 y_labelsize *= rect[3] ** 0.5 subaxes.xaxis.set_tick_params(labelsize=x_labelsize) subaxes.yaxis.set_tick_params(labelsize=y_labelsize) return subaxes def draw_data(self, type='plot', width=None, height=None, xlim=None, ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'): """Draw data. Parameters ---------- type : 'plot', 'spectrogram', optional Type of plot xlim : 2-tuple of floats, optional X-axis limits ylim : 2-tuple of floats, optional Y-axis limits vmin : float, optional Minimum value for spectrogram colormap vmax : float, optional Maximum value for spectrogram colormap axis : bool, optional Determine whether the axis should be shown """ if self.data is not None: if ylim is None: if self.ylim is None and type != 'spectrogram': ylim = self.auto_ylim(xlim, yscale=yscale) else: ylim = self.ylim if xlim is None: xlim = self.xlim if vmin is None: vmin = 0 number_of_electrodes = len([electrode for electrode in self. data.columns if electrode in ELECTRODES]) if width is None: if number_of_electrodes > 32: width = 0.15 else: width = 0.25 if height is None: height = 0.25 for electrode in self.data.columns: if electrode in ELECTRODES: x, y = ELECTRODES[electrode] subaxes = self.add_subplot_axes(self.axes, [x - width / 2, y - height / 2, width, height], axis_bgcolor='w') if type == 'plot': self.data.ix[:, electrode].plot(ax=subaxes, xlim= xlim, ylim=ylim) if not axis: trans = transforms.blended_transform_factory( subaxes.transAxes, subaxes.transData) line = lines.Line2D((0, 1), (0, 0), transform= trans, color=(0, 0, 0)) subaxes.add_line(line) trans = transforms.blended_transform_factory( subaxes.transAxes, subaxes.transAxes) line = lines.Line2D((0, 0), (0, 1), transform= trans, color=(0, 0, 0)) subaxes.add_line(line) elif type == 'spectrogram': spectrum, frequencies, midpoints, axes = plt.specgram( self.data.ix[:, electrode], Fs=self.data. sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes) if xlim is None: xlim = midpoints[0], midpoints[-1] subaxes.set_xlim(xlim) if ylim is None: ylim = frequencies[0], frequencies[-1] subaxes.set_ylim(ylim) else: raise ValueError("Wrong value for 'type' argument") if not axis: subaxes.set_axis_off() subaxes.text(0.5, 0.95, electrode, transform=subaxes. transAxes, fontweight='bold', va='top', ha='center') subaxes.set_yticklabels([]) subaxes.set_xticklabels([]) self._subaxes.append(subaxes) @property def xlim(self): """Return xlim for subplots.""" lim = [ax.get_xlim() for ax in self._subaxes] if lim == []: lim = None return lim @xlim.setter def xlim(self, left=None, right=None): """Set x-axis limits on all subplots.""" for ax in self._subaxes: ax.set_xlim(left, right) self.figure.canvas.draw() @property def ylim(self): """Return ylim for subplots.""" lim = [ax.get_ylim() for ax in self._subaxes] if lim == []: lim = None return lim @ylim.setter def ylim(self, bottom=None, top=None): """Set y-axis limits on all subplots.""" for ax in self._subaxes: ax.set_ylim(bottom, top) self.figure.canvas.draw() @property def yscale(self): """Return yscale for subplots.""" yscales = [ax.get_yscale() for ax in self._subaxes] return yscales @yscale.setter def yscale(self, value='linear'): """Set y-axis limits on all subplots.""" for ax in self._subaxes: ax.set_yscale(value) self.figure.canvas.draw() def auto_ylim(self, xlim=None, yscale='linear'): """Return an estimate for a good ylim. Parameters ---------- xlim : 2-tuple, optional Limits in (the index of) the data from where the scaling should be computed. yscale : linear or log, optional Scaling of y-axis. """ electrodes = [col for col in self.data.columns if col in ELECTRODES] if xlim is None: data = self.data.ix[:, electrodes] else: indices = (self.data.index >= xlim[0]) & (self.data.index <= xlim[1]) data = self.data.ix[indices, electrodes] min_data = data.min().min() max_data = data.max().max() abs_max = max(abs(min_data), max_data) if yscale == 'linear' or yscale == 'symlog': if min_data >= 0: ylim = 0, max_data else: ylim = -abs_max, abs_max elif yscale == 'log': if min_data > 0: ylim = min_data, max_data else: pseudo_zero = abs_max * 10 ** -5 ylim = pseudo_zero, abs_max else: raise ValueError('Wrong value to yscale: {}'.format(yscale)) return ylim def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'): """Draw all components in multiplot including the data. Parameters ---------- title : str, optional Title to put on the plot xlim : tuple of floats, optional X-axis limits used for each individual plots ylim : tuple of floats, optional Y-axis limits used for each individual plots """ self.axes.axis((-1.2, 1.2, -1.2, 1.2)) self.draw_head() self.draw_inner_head() self.draw_nose() self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax= vmax, axis=axis, yscale=yscale) if title is not None: self.axes.set_title(title) self.yscale = yscale <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class TopoPlot(object): <|reserved_special_token_0|> def __init__(self, data=None, axes=None): """Setup defaults. Parameters ---------- data : Pandas.Series or dict Pandas Series with values indexed by electrodes. axes : matplotlib.axes.AxesSubplot object Axis object to render on. """ if axes is None: self.figure = plt.figure() axes = self.figure.gca() else: self.figure = axes.get_figure() self.axes = axes self.center = np.array((0, 0)) if isinstance(data, dict): self.data = pd.Series(data) elif isinstance(data, pd.Series): self.data = data elif data is None: self.data = None else: raise ValueError("Wrong type of value for 'data': {}".format( type(data))) @staticmethod def normalize_electrode_name(name): """Normalize electrode name. Parameters ---------- name : str Name of electrode to be normalized Examples -------- >>> TopoPlot.normalize_electrode_name('fpz') 'Fpz' >>> TopoPlot.normalize_electrode_name('AFZ') 'AFz' """ return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z') def draw_electrodes(self): """Draw electrodes.""" for electrode, position in ELECTRODES.items(): circle = plt.Circle(self.center + position, radius=0.04, fill= True, facecolor=(1, 1, 1)) self.axes.add_patch(circle) position = self.center + position self.axes.text(position[0], position[1], electrode, verticalalignment='center', horizontalalignment='center', size=6) def draw_head(self): """Draw outer head.""" circle = plt.Circle(self.center, radius=1, fill=False) self.axes.add_patch(circle) def draw_inner_head(self): """Draw inner head.""" circle = plt.Circle(self.center, radius=0.8, fill=False) self.axes.add_patch(circle) def draw_nose(self): """Draw nose.""" nose = plt.Line2D([sin(-0.1), 0, sin(0.1)], [cos(-0.1), 1.1, cos( 0.1)], color=(0, 0, 0)) self.axes.add_line(nose) def draw_data(self, method='linear', number_of_contours=10): """Draw countours from provided data.""" if self.data is not None: xi, yi = np.mgrid[-1:1:100.0j, -1:1:100.0j] points = [] for electrode in self.data.index: name = TopoPlot.normalize_electrode_name(electrode) points.append(ELECTRODES[name]) zi = griddata(points, self.data.values, (xi, yi), method=method) if number_of_contours is None: number_of_contours = 10 plt.contourf(xi, yi, zi, number_of_contours) def draw(self, title=None, method='linear', number_of_contours=None): """Draw all components in topoplot including the data. Parameters ---------- title : str, optional Title to put on the plot methods : str, optional Interpolation method number_of_contours : int Number of contours in the colored plot. Examples -------- >>> import matplotlib.pyplot as plt >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4} >>> plt.ion() >>> topo_plot = TopoPlot(data) >>> topo_plot.draw() """ self.draw_head() self.draw_inner_head() self.draw_electrodes() self.draw_nose() self.draw_data(method=method, number_of_contours=number_of_contours) self.axes.axis((-1.2, 1.2, -1.2, 1.2)) self.axes.axis('equal') if title is not None: self.axes.set_title(title) class MultiPlot(TopoPlot): """Multiple plots organized topographically. References ---------- http://www.fieldtriptoolbox.org/reference/ft_multiploter """ def __init__(self, data=None, axes=None, xlim=None, ylim=None): """Setup defaults. Parameters ---------- data : Pandas.DataFrame Pandas DataFrame with values indexed by electrodes. axes : matplotlib.axes.AxesSubplot object Axis object to render on. """ if axes is None: self.figure = plt.figure() axes = self.figure.gca() else: self.figure = axes.get_figure() self.axes = axes self._subaxes = [] self.xlim = xlim self.ylim = ylim self.center = np.array((0, 0)) if isinstance(data, pd.DataFrame): self.data = data elif data is None: self.data = None else: raise ValueError("Wrong type of value for 'data': {}".format( type(data))) def add_subplot_axes(self, ax, rect, axis_bgcolor=None): """Add subaxes to currect specified axes. References ---------- Pablo https://stackoverflow.com/users/2309442/pablo Pablo's answer to "Embedding small plots inside subplots in matplotlib" https://stackoverflow.com/questions/17458580/ """ box = ax.get_position() width, height = box.width, box.height subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3])] subaxes_display_coords = ax.transData.transform(subaxes_box) trans_figure = self.figure.transFigure.inverted() subaxes_figure_coords = trans_figure.transform(subaxes_display_coords) x, y = subaxes_figure_coords[0, :] width, height = subaxes_figure_coords[1, :] - subaxes_figure_coords[ 0, :] subaxes = self.figure.add_axes([x, y, width, height], axis_bgcolor= axis_bgcolor) x_labelsize = subaxes.get_xticklabels()[0].get_size() y_labelsize = subaxes.get_yticklabels()[0].get_size() x_labelsize *= rect[2] ** 0.5 y_labelsize *= rect[3] ** 0.5 subaxes.xaxis.set_tick_params(labelsize=x_labelsize) subaxes.yaxis.set_tick_params(labelsize=y_labelsize) return subaxes def draw_data(self, type='plot', width=None, height=None, xlim=None, ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'): """Draw data. Parameters ---------- type : 'plot', 'spectrogram', optional Type of plot xlim : 2-tuple of floats, optional X-axis limits ylim : 2-tuple of floats, optional Y-axis limits vmin : float, optional Minimum value for spectrogram colormap vmax : float, optional Maximum value for spectrogram colormap axis : bool, optional Determine whether the axis should be shown """ if self.data is not None: if ylim is None: if self.ylim is None and type != 'spectrogram': ylim = self.auto_ylim(xlim, yscale=yscale) else: ylim = self.ylim if xlim is None: xlim = self.xlim if vmin is None: vmin = 0 number_of_electrodes = len([electrode for electrode in self. data.columns if electrode in ELECTRODES]) if width is None: if number_of_electrodes > 32: width = 0.15 else: width = 0.25 if height is None: height = 0.25 for electrode in self.data.columns: if electrode in ELECTRODES: x, y = ELECTRODES[electrode] subaxes = self.add_subplot_axes(self.axes, [x - width / 2, y - height / 2, width, height], axis_bgcolor='w') if type == 'plot': self.data.ix[:, electrode].plot(ax=subaxes, xlim= xlim, ylim=ylim) if not axis: trans = transforms.blended_transform_factory( subaxes.transAxes, subaxes.transData) line = lines.Line2D((0, 1), (0, 0), transform= trans, color=(0, 0, 0)) subaxes.add_line(line) trans = transforms.blended_transform_factory( subaxes.transAxes, subaxes.transAxes) line = lines.Line2D((0, 0), (0, 1), transform= trans, color=(0, 0, 0)) subaxes.add_line(line) elif type == 'spectrogram': spectrum, frequencies, midpoints, axes = plt.specgram( self.data.ix[:, electrode], Fs=self.data. sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes) if xlim is None: xlim = midpoints[0], midpoints[-1] subaxes.set_xlim(xlim) if ylim is None: ylim = frequencies[0], frequencies[-1] subaxes.set_ylim(ylim) else: raise ValueError("Wrong value for 'type' argument") if not axis: subaxes.set_axis_off() subaxes.text(0.5, 0.95, electrode, transform=subaxes. transAxes, fontweight='bold', va='top', ha='center') subaxes.set_yticklabels([]) subaxes.set_xticklabels([]) self._subaxes.append(subaxes) @property def xlim(self): """Return xlim for subplots.""" lim = [ax.get_xlim() for ax in self._subaxes] if lim == []: lim = None return lim @xlim.setter def xlim(self, left=None, right=None): """Set x-axis limits on all subplots.""" for ax in self._subaxes: ax.set_xlim(left, right) self.figure.canvas.draw() @property def ylim(self): """Return ylim for subplots.""" lim = [ax.get_ylim() for ax in self._subaxes] if lim == []: lim = None return lim @ylim.setter def ylim(self, bottom=None, top=None): """Set y-axis limits on all subplots.""" for ax in self._subaxes: ax.set_ylim(bottom, top) self.figure.canvas.draw() @property def yscale(self): """Return yscale for subplots.""" yscales = [ax.get_yscale() for ax in self._subaxes] return yscales @yscale.setter def yscale(self, value='linear'): """Set y-axis limits on all subplots.""" for ax in self._subaxes: ax.set_yscale(value) self.figure.canvas.draw() def auto_ylim(self, xlim=None, yscale='linear'): """Return an estimate for a good ylim. Parameters ---------- xlim : 2-tuple, optional Limits in (the index of) the data from where the scaling should be computed. yscale : linear or log, optional Scaling of y-axis. """ electrodes = [col for col in self.data.columns if col in ELECTRODES] if xlim is None: data = self.data.ix[:, electrodes] else: indices = (self.data.index >= xlim[0]) & (self.data.index <= xlim[1]) data = self.data.ix[indices, electrodes] min_data = data.min().min() max_data = data.max().max() abs_max = max(abs(min_data), max_data) if yscale == 'linear' or yscale == 'symlog': if min_data >= 0: ylim = 0, max_data else: ylim = -abs_max, abs_max elif yscale == 'log': if min_data > 0: ylim = min_data, max_data else: pseudo_zero = abs_max * 10 ** -5 ylim = pseudo_zero, abs_max else: raise ValueError('Wrong value to yscale: {}'.format(yscale)) return ylim def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'): """Draw all components in multiplot including the data. Parameters ---------- title : str, optional Title to put on the plot xlim : tuple of floats, optional X-axis limits used for each individual plots ylim : tuple of floats, optional Y-axis limits used for each individual plots """ self.axes.axis((-1.2, 1.2, -1.2, 1.2)) self.draw_head() self.draw_inner_head() self.draw_nose() self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax= vmax, axis=axis, yscale=yscale) if title is not None: self.axes.set_title(title) self.yscale = yscale <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class TopoPlot(object): """Topographic plot.""" def __init__(self, data=None, axes=None): """Setup defaults. Parameters ---------- data : Pandas.Series or dict Pandas Series with values indexed by electrodes. axes : matplotlib.axes.AxesSubplot object Axis object to render on. """ if axes is None: self.figure = plt.figure() axes = self.figure.gca() else: self.figure = axes.get_figure() self.axes = axes self.center = np.array((0, 0)) if isinstance(data, dict): self.data = pd.Series(data) elif isinstance(data, pd.Series): self.data = data elif data is None: self.data = None else: raise ValueError("Wrong type of value for 'data': {}".format( type(data))) @staticmethod def normalize_electrode_name(name): """Normalize electrode name. Parameters ---------- name : str Name of electrode to be normalized Examples -------- >>> TopoPlot.normalize_electrode_name('fpz') 'Fpz' >>> TopoPlot.normalize_electrode_name('AFZ') 'AFz' """ return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z') def draw_electrodes(self): """Draw electrodes.""" for electrode, position in ELECTRODES.items(): circle = plt.Circle(self.center + position, radius=0.04, fill= True, facecolor=(1, 1, 1)) self.axes.add_patch(circle) position = self.center + position self.axes.text(position[0], position[1], electrode, verticalalignment='center', horizontalalignment='center', size=6) def draw_head(self): """Draw outer head.""" circle = plt.Circle(self.center, radius=1, fill=False) self.axes.add_patch(circle) def draw_inner_head(self): """Draw inner head.""" circle = plt.Circle(self.center, radius=0.8, fill=False) self.axes.add_patch(circle) def draw_nose(self): """Draw nose.""" nose = plt.Line2D([sin(-0.1), 0, sin(0.1)], [cos(-0.1), 1.1, cos( 0.1)], color=(0, 0, 0)) self.axes.add_line(nose) def draw_data(self, method='linear', number_of_contours=10): """Draw countours from provided data.""" if self.data is not None: xi, yi = np.mgrid[-1:1:100.0j, -1:1:100.0j] points = [] for electrode in self.data.index: name = TopoPlot.normalize_electrode_name(electrode) points.append(ELECTRODES[name]) zi = griddata(points, self.data.values, (xi, yi), method=method) if number_of_contours is None: number_of_contours = 10 plt.contourf(xi, yi, zi, number_of_contours) def draw(self, title=None, method='linear', number_of_contours=None): """Draw all components in topoplot including the data. Parameters ---------- title : str, optional Title to put on the plot methods : str, optional Interpolation method number_of_contours : int Number of contours in the colored plot. Examples -------- >>> import matplotlib.pyplot as plt >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4} >>> plt.ion() >>> topo_plot = TopoPlot(data) >>> topo_plot.draw() """ self.draw_head() self.draw_inner_head() self.draw_electrodes() self.draw_nose() self.draw_data(method=method, number_of_contours=number_of_contours) self.axes.axis((-1.2, 1.2, -1.2, 1.2)) self.axes.axis('equal') if title is not None: self.axes.set_title(title) class MultiPlot(TopoPlot): """Multiple plots organized topographically. References ---------- http://www.fieldtriptoolbox.org/reference/ft_multiploter """ def __init__(self, data=None, axes=None, xlim=None, ylim=None): """Setup defaults. Parameters ---------- data : Pandas.DataFrame Pandas DataFrame with values indexed by electrodes. axes : matplotlib.axes.AxesSubplot object Axis object to render on. """ if axes is None: self.figure = plt.figure() axes = self.figure.gca() else: self.figure = axes.get_figure() self.axes = axes self._subaxes = [] self.xlim = xlim self.ylim = ylim self.center = np.array((0, 0)) if isinstance(data, pd.DataFrame): self.data = data elif data is None: self.data = None else: raise ValueError("Wrong type of value for 'data': {}".format( type(data))) def add_subplot_axes(self, ax, rect, axis_bgcolor=None): """Add subaxes to currect specified axes. References ---------- Pablo https://stackoverflow.com/users/2309442/pablo Pablo's answer to "Embedding small plots inside subplots in matplotlib" https://stackoverflow.com/questions/17458580/ """ box = ax.get_position() width, height = box.width, box.height subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3])] subaxes_display_coords = ax.transData.transform(subaxes_box) trans_figure = self.figure.transFigure.inverted() subaxes_figure_coords = trans_figure.transform(subaxes_display_coords) x, y = subaxes_figure_coords[0, :] width, height = subaxes_figure_coords[1, :] - subaxes_figure_coords[ 0, :] subaxes = self.figure.add_axes([x, y, width, height], axis_bgcolor= axis_bgcolor) x_labelsize = subaxes.get_xticklabels()[0].get_size() y_labelsize = subaxes.get_yticklabels()[0].get_size() x_labelsize *= rect[2] ** 0.5 y_labelsize *= rect[3] ** 0.5 subaxes.xaxis.set_tick_params(labelsize=x_labelsize) subaxes.yaxis.set_tick_params(labelsize=y_labelsize) return subaxes def draw_data(self, type='plot', width=None, height=None, xlim=None, ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'): """Draw data. Parameters ---------- type : 'plot', 'spectrogram', optional Type of plot xlim : 2-tuple of floats, optional X-axis limits ylim : 2-tuple of floats, optional Y-axis limits vmin : float, optional Minimum value for spectrogram colormap vmax : float, optional Maximum value for spectrogram colormap axis : bool, optional Determine whether the axis should be shown """ if self.data is not None: if ylim is None: if self.ylim is None and type != 'spectrogram': ylim = self.auto_ylim(xlim, yscale=yscale) else: ylim = self.ylim if xlim is None: xlim = self.xlim if vmin is None: vmin = 0 number_of_electrodes = len([electrode for electrode in self. data.columns if electrode in ELECTRODES]) if width is None: if number_of_electrodes > 32: width = 0.15 else: width = 0.25 if height is None: height = 0.25 for electrode in self.data.columns: if electrode in ELECTRODES: x, y = ELECTRODES[electrode] subaxes = self.add_subplot_axes(self.axes, [x - width / 2, y - height / 2, width, height], axis_bgcolor='w') if type == 'plot': self.data.ix[:, electrode].plot(ax=subaxes, xlim= xlim, ylim=ylim) if not axis: trans = transforms.blended_transform_factory( subaxes.transAxes, subaxes.transData) line = lines.Line2D((0, 1), (0, 0), transform= trans, color=(0, 0, 0)) subaxes.add_line(line) trans = transforms.blended_transform_factory( subaxes.transAxes, subaxes.transAxes) line = lines.Line2D((0, 0), (0, 1), transform= trans, color=(0, 0, 0)) subaxes.add_line(line) elif type == 'spectrogram': spectrum, frequencies, midpoints, axes = plt.specgram( self.data.ix[:, electrode], Fs=self.data. sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes) if xlim is None: xlim = midpoints[0], midpoints[-1] subaxes.set_xlim(xlim) if ylim is None: ylim = frequencies[0], frequencies[-1] subaxes.set_ylim(ylim) else: raise ValueError("Wrong value for 'type' argument") if not axis: subaxes.set_axis_off() subaxes.text(0.5, 0.95, electrode, transform=subaxes. transAxes, fontweight='bold', va='top', ha='center') subaxes.set_yticklabels([]) subaxes.set_xticklabels([]) self._subaxes.append(subaxes) @property def xlim(self): """Return xlim for subplots.""" lim = [ax.get_xlim() for ax in self._subaxes] if lim == []: lim = None return lim @xlim.setter def xlim(self, left=None, right=None): """Set x-axis limits on all subplots.""" for ax in self._subaxes: ax.set_xlim(left, right) self.figure.canvas.draw() @property def ylim(self): """Return ylim for subplots.""" lim = [ax.get_ylim() for ax in self._subaxes] if lim == []: lim = None return lim @ylim.setter def ylim(self, bottom=None, top=None): """Set y-axis limits on all subplots.""" for ax in self._subaxes: ax.set_ylim(bottom, top) self.figure.canvas.draw() @property def yscale(self): """Return yscale for subplots.""" yscales = [ax.get_yscale() for ax in self._subaxes] return yscales @yscale.setter def yscale(self, value='linear'): """Set y-axis limits on all subplots.""" for ax in self._subaxes: ax.set_yscale(value) self.figure.canvas.draw() def auto_ylim(self, xlim=None, yscale='linear'): """Return an estimate for a good ylim. Parameters ---------- xlim : 2-tuple, optional Limits in (the index of) the data from where the scaling should be computed. yscale : linear or log, optional Scaling of y-axis. """ electrodes = [col for col in self.data.columns if col in ELECTRODES] if xlim is None: data = self.data.ix[:, electrodes] else: indices = (self.data.index >= xlim[0]) & (self.data.index <= xlim[1]) data = self.data.ix[indices, electrodes] min_data = data.min().min() max_data = data.max().max() abs_max = max(abs(min_data), max_data) if yscale == 'linear' or yscale == 'symlog': if min_data >= 0: ylim = 0, max_data else: ylim = -abs_max, abs_max elif yscale == 'log': if min_data > 0: ylim = min_data, max_data else: pseudo_zero = abs_max * 10 ** -5 ylim = pseudo_zero, abs_max else: raise ValueError('Wrong value to yscale: {}'.format(yscale)) return ylim def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'): """Draw all components in multiplot including the data. Parameters ---------- title : str, optional Title to put on the plot xlim : tuple of floats, optional X-axis limits used for each individual plots ylim : tuple of floats, optional Y-axis limits used for each individual plots """ self.axes.axis((-1.2, 1.2, -1.2, 1.2)) self.draw_head() self.draw_inner_head() self.draw_nose() self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax= vmax, axis=axis, yscale=yscale) if title is not None: self.axes.set_title(title) self.yscale = yscale <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class TopoPlot(object): """Topographic plot.""" def __init__(self, data=None, axes=None): """Setup defaults. Parameters ---------- data : Pandas.Series or dict Pandas Series with values indexed by electrodes. axes : matplotlib.axes.AxesSubplot object Axis object to render on. """ if axes is None: self.figure = plt.figure() axes = self.figure.gca() else: self.figure = axes.get_figure() self.axes = axes self.center = np.array((0, 0)) if isinstance(data, dict): self.data = pd.Series(data) elif isinstance(data, pd.Series): self.data = data elif data is None: self.data = None else: raise ValueError("Wrong type of value for 'data': {}".format( type(data))) @staticmethod def normalize_electrode_name(name): """Normalize electrode name. Parameters ---------- name : str Name of electrode to be normalized Examples -------- >>> TopoPlot.normalize_electrode_name('fpz') 'Fpz' >>> TopoPlot.normalize_electrode_name('AFZ') 'AFz' """ return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z') def draw_electrodes(self): """Draw electrodes.""" for electrode, position in ELECTRODES.items(): circle = plt.Circle(self.center + position, radius=0.04, fill= True, facecolor=(1, 1, 1)) self.axes.add_patch(circle) position = self.center + position self.axes.text(position[0], position[1], electrode, verticalalignment='center', horizontalalignment='center', size=6) def draw_head(self): """Draw outer head.""" circle = plt.Circle(self.center, radius=1, fill=False) self.axes.add_patch(circle) def draw_inner_head(self): """Draw inner head.""" circle = plt.Circle(self.center, radius=0.8, fill=False) self.axes.add_patch(circle) def draw_nose(self): """Draw nose.""" nose = plt.Line2D([sin(-0.1), 0, sin(0.1)], [cos(-0.1), 1.1, cos( 0.1)], color=(0, 0, 0)) self.axes.add_line(nose) def draw_data(self, method='linear', number_of_contours=10): """Draw countours from provided data.""" if self.data is not None: xi, yi = np.mgrid[-1:1:100.0j, -1:1:100.0j] points = [] for electrode in self.data.index: name = TopoPlot.normalize_electrode_name(electrode) points.append(ELECTRODES[name]) zi = griddata(points, self.data.values, (xi, yi), method=method) if number_of_contours is None: number_of_contours = 10 plt.contourf(xi, yi, zi, number_of_contours) def draw(self, title=None, method='linear', number_of_contours=None): """Draw all components in topoplot including the data. Parameters ---------- title : str, optional Title to put on the plot methods : str, optional Interpolation method number_of_contours : int Number of contours in the colored plot. Examples -------- >>> import matplotlib.pyplot as plt >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4} >>> plt.ion() >>> topo_plot = TopoPlot(data) >>> topo_plot.draw() """ self.draw_head() self.draw_inner_head() self.draw_electrodes() self.draw_nose() self.draw_data(method=method, number_of_contours=number_of_contours) self.axes.axis((-1.2, 1.2, -1.2, 1.2)) self.axes.axis('equal') if title is not None: self.axes.set_title(title) class MultiPlot(TopoPlot): """Multiple plots organized topographically. References ---------- http://www.fieldtriptoolbox.org/reference/ft_multiploter """ def __init__(self, data=None, axes=None, xlim=None, ylim=None): """Setup defaults. Parameters ---------- data : Pandas.DataFrame Pandas DataFrame with values indexed by electrodes. axes : matplotlib.axes.AxesSubplot object Axis object to render on. """ if axes is None: self.figure = plt.figure() axes = self.figure.gca() else: self.figure = axes.get_figure() self.axes = axes self._subaxes = [] self.xlim = xlim self.ylim = ylim self.center = np.array((0, 0)) if isinstance(data, pd.DataFrame): self.data = data elif data is None: self.data = None else: raise ValueError("Wrong type of value for 'data': {}".format( type(data))) def add_subplot_axes(self, ax, rect, axis_bgcolor=None): """Add subaxes to currect specified axes. References ---------- Pablo https://stackoverflow.com/users/2309442/pablo Pablo's answer to "Embedding small plots inside subplots in matplotlib" https://stackoverflow.com/questions/17458580/ """ box = ax.get_position() width, height = box.width, box.height subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3])] subaxes_display_coords = ax.transData.transform(subaxes_box) trans_figure = self.figure.transFigure.inverted() subaxes_figure_coords = trans_figure.transform(subaxes_display_coords) x, y = subaxes_figure_coords[0, :] width, height = subaxes_figure_coords[1, :] - subaxes_figure_coords[ 0, :] subaxes = self.figure.add_axes([x, y, width, height], axis_bgcolor= axis_bgcolor) x_labelsize = subaxes.get_xticklabels()[0].get_size() y_labelsize = subaxes.get_yticklabels()[0].get_size() x_labelsize *= rect[2] ** 0.5 y_labelsize *= rect[3] ** 0.5 subaxes.xaxis.set_tick_params(labelsize=x_labelsize) subaxes.yaxis.set_tick_params(labelsize=y_labelsize) return subaxes def draw_data(self, type='plot', width=None, height=None, xlim=None, ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'): """Draw data. Parameters ---------- type : 'plot', 'spectrogram', optional Type of plot xlim : 2-tuple of floats, optional X-axis limits ylim : 2-tuple of floats, optional Y-axis limits vmin : float, optional Minimum value for spectrogram colormap vmax : float, optional Maximum value for spectrogram colormap axis : bool, optional Determine whether the axis should be shown """ if self.data is not None: if ylim is None: if self.ylim is None and type != 'spectrogram': ylim = self.auto_ylim(xlim, yscale=yscale) else: ylim = self.ylim if xlim is None: xlim = self.xlim if vmin is None: vmin = 0 number_of_electrodes = len([electrode for electrode in self. data.columns if electrode in ELECTRODES]) if width is None: if number_of_electrodes > 32: width = 0.15 else: width = 0.25 if height is None: height = 0.25 for electrode in self.data.columns: if electrode in ELECTRODES: x, y = ELECTRODES[electrode] subaxes = self.add_subplot_axes(self.axes, [x - width / 2, y - height / 2, width, height], axis_bgcolor='w') if type == 'plot': self.data.ix[:, electrode].plot(ax=subaxes, xlim= xlim, ylim=ylim) if not axis: trans = transforms.blended_transform_factory( subaxes.transAxes, subaxes.transData) line = lines.Line2D((0, 1), (0, 0), transform= trans, color=(0, 0, 0)) subaxes.add_line(line) trans = transforms.blended_transform_factory( subaxes.transAxes, subaxes.transAxes) line = lines.Line2D((0, 0), (0, 1), transform= trans, color=(0, 0, 0)) subaxes.add_line(line) elif type == 'spectrogram': spectrum, frequencies, midpoints, axes = plt.specgram( self.data.ix[:, electrode], Fs=self.data. sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes) if xlim is None: xlim = midpoints[0], midpoints[-1] subaxes.set_xlim(xlim) if ylim is None: ylim = frequencies[0], frequencies[-1] subaxes.set_ylim(ylim) else: raise ValueError("Wrong value for 'type' argument") if not axis: subaxes.set_axis_off() subaxes.text(0.5, 0.95, electrode, transform=subaxes. transAxes, fontweight='bold', va='top', ha='center') subaxes.set_yticklabels([]) subaxes.set_xticklabels([]) self._subaxes.append(subaxes) @property def xlim(self): """Return xlim for subplots.""" lim = [ax.get_xlim() for ax in self._subaxes] if lim == []: lim = None return lim @xlim.setter def xlim(self, left=None, right=None): """Set x-axis limits on all subplots.""" for ax in self._subaxes: ax.set_xlim(left, right) self.figure.canvas.draw() @property def ylim(self): """Return ylim for subplots.""" lim = [ax.get_ylim() for ax in self._subaxes] if lim == []: lim = None return lim @ylim.setter def ylim(self, bottom=None, top=None): """Set y-axis limits on all subplots.""" for ax in self._subaxes: ax.set_ylim(bottom, top) self.figure.canvas.draw() @property def yscale(self): """Return yscale for subplots.""" yscales = [ax.get_yscale() for ax in self._subaxes] return yscales @yscale.setter def yscale(self, value='linear'): """Set y-axis limits on all subplots.""" for ax in self._subaxes: ax.set_yscale(value) self.figure.canvas.draw() def auto_ylim(self, xlim=None, yscale='linear'): """Return an estimate for a good ylim. Parameters ---------- xlim : 2-tuple, optional Limits in (the index of) the data from where the scaling should be computed. yscale : linear or log, optional Scaling of y-axis. """ electrodes = [col for col in self.data.columns if col in ELECTRODES] if xlim is None: data = self.data.ix[:, electrodes] else: indices = (self.data.index >= xlim[0]) & (self.data.index <= xlim[1]) data = self.data.ix[indices, electrodes] min_data = data.min().min() max_data = data.max().max() abs_max = max(abs(min_data), max_data) if yscale == 'linear' or yscale == 'symlog': if min_data >= 0: ylim = 0, max_data else: ylim = -abs_max, abs_max elif yscale == 'log': if min_data > 0: ylim = min_data, max_data else: pseudo_zero = abs_max * 10 ** -5 ylim = pseudo_zero, abs_max else: raise ValueError('Wrong value to yscale: {}'.format(yscale)) return ylim def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'): """Draw all components in multiplot including the data. Parameters ---------- title : str, optional Title to put on the plot xlim : tuple of floats, optional X-axis limits used for each individual plots ylim : tuple of floats, optional Y-axis limits used for each individual plots """ self.axes.axis((-1.2, 1.2, -1.2, 1.2)) self.draw_head() self.draw_inner_head() self.draw_nose() self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax= vmax, axis=axis, yscale=yscale) if title is not None: self.axes.set_title(title) self.yscale = yscale def topoplot(data=None, axes=None, method='linear', number_of_contours=10, title=None, xlim=None, ylim=None): """Plot topographic map of the scalp in 2-D circular view. Draw the colored scalp map based on data in a Pandas Series where the values are indexed according to electrode name. Parameters ---------- data : pandas.Series or pandas.DataFrame, optional Series with values and indexed by electrode names. methods : str, optional Interpolation method number_of_contours : int Number of contours in the colored plot. xlim : 2-tuple of floats, optional Limits of x-axis in multiplot ylim : 2-tuple of floats, optional Limits of y-axis in multiplot References ---------- https://github.com/compmem/ptsa/blob/master/ptsa/plotting/topo.py http://sccn.ucsd.edu/~jung/tutorial/topoplot.htm Examples -------- >>> import matplotlib.pyplot as plt >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4} >>> plt.ion() >>> topo_plot = topoplot(data) """ if isinstance(data, pd.Series) or isinstance(data, dict) or data is None: topo_plot = TopoPlot(data=data, axes=axes) topo_plot.draw(title=title, method=method, number_of_contours= number_of_contours) return topo_plot elif isinstance(data, pd.DataFrame): multi_plot = MultiPlot(data=data, axes=axes) multi_plot.draw(title=title, xlim=xlim, ylim=ylim) return multi_plot def show(): """Show plot.""" plt.show() <|reserved_special_token_0|> <|reserved_special_token_1|> #!/usr/bin/env python """ Plot EEG data. Usage: plotting.py [options] [<file>] Options: -h --help Show this screen. --version Show version. --center Center the data before plotting --sample-index=N Row index (indexed from one). --transpose Transpose data. --xlim=lim X-axis limits. Data ---- ELECTRODES : dict Dictionary indexed by electrode name with 2D positions as values References ---------- The five percent electrode system for high-resolution EEG and ERP measurement, Robert Oostenveld, Peter Praamstra. """ from __future__ import absolute_import, division, print_function from math import cos, pi, sin import matplotlib.lines as lines import matplotlib.pyplot as plt import matplotlib.transforms as transforms import numpy as np import pandas as pd from scipy.interpolate import griddata __all__ = ('ELECTRODES', 'MultiPlot', 'TopoPlot', 'topoplot') ELECTRODES = { 'AF3': (-0.25, 0.62), 'AF4': (0.25, 0.62), 'AF7': (0.8 * cos(0.7 * pi), 0.8 * sin(0.7 * pi)), 'AF8': (0.8 * cos(0.3 * pi), 0.8 * sin(0.3 * pi)), 'AFz': (0, 0.6), 'C1': (-0.2, 0), 'C2': (0.2, 0), 'C3': (-0.4, 0), 'C4': (0.4, 0), 'C5': (-0.6, 0), 'C6': (0.6, 0), 'CP1': (-0.18, -0.2), 'CP2': (0.18, -0.2), 'CP3': (-0.36, 0.4 * sin(1.17 * pi)), 'CP4': (0.36, 0.4 * sin(1.83 * pi)), 'CP5': (0.6 * cos(1.12 * pi), 0.6 * sin(1.12 * pi)), 'CP6': (0.6 * cos(1.88 * pi), 0.6 * sin(1.88 * pi)), 'CPz': (0, -0.2), 'Cz': (0, 0), 'F1': (-0.18, 0.4), 'F2': (0.18, 0.4), 'F3': (-0.35, 0.41), 'F4': (0.35, 0.41), 'F5': (-0.5, 0.43), 'F6': (0.5, 0.43), 'F7': (0.8 * cos(0.8 * pi), 0.8 * sin(0.8 * pi)), 'F8': (0.8 * cos(0.2 * pi), 0.8 * sin(0.2 * pi)), 'FC1': (-0.2, 0.21), 'FC2': (0.2, 0.21), 'FC3': (-0.39, 0.22), 'FC4': (0.39, 0.22), 'FC5': (-0.57, 0.23), 'FC6': (0.57, 0.23), 'FCz': (0, 0.2), 'FP1': (0.8 * cos(0.6 * pi), 0.8 * sin(0.6 * pi)), 'FP2': (0.8 * cos(0.4 * pi), 0.8 * sin(0.4 * pi)), 'Fpz': (0, 0.8), 'FT7': (0.8 * cos(0.9 * pi), 0.8 * sin(0.9 * pi)), 'FT8': (0.8 * cos(0.1 * pi), 0.8 * sin(0.1 * pi)), 'Fz': (0, 0.4), 'Iz': (0, -1), 'Nz': (0, 1), 'P1': (-0.18, -0.41), 'P2': (0.18, -0.41), 'P3': (-0.35, -0.42), 'P4': (0.35, -0.42), 'P5': (-0.5, -0.44), 'P6': (0.5, -0.44), 'P7': (0.8 * cos(1.2 * pi), 0.8 * sin(1.2 * pi)), 'P8': (0.8 * cos(1.8 * pi), 0.8 * sin(1.8 * pi)), 'PO3': (-0.24, -0.62), 'PO4': (0.24, -0.62), 'PO7': (0.8 * cos(1.3 * pi), 0.8 * sin(1.3 * pi)), 'PO8': (0.8 * cos(1.7 * pi), 0.8 * sin(1.7 * pi)), 'POz': (0, -0.6), 'Pz': (0, -0.4), 'O1': (0.8 * cos(1.4 * pi), 0.8 * sin(1.4 * pi)), 'O2': (0.8 * cos(1.6 * pi), 0.8 * sin(1.6 * pi)), 'Oz': (0, -0.8), 'T7': (-0.8, 0), 'T8': (0.8, 0), 'T9': (-1, 0), 'T10': (1, 0), 'TP7': (0.8 * cos(1.1 * pi), 0.8 * sin(1.1 * pi)), 'TP8': (0.8 * cos(1.9 * pi), 0.8 * sin(1.9 * pi)), 'TP9': (cos(1.1 * pi), sin(1.1 * pi)), 'TP10': (cos(1.9 * pi), sin(1.9 * pi)), } class TopoPlot(object): """Topographic plot.""" def __init__(self, data=None, axes=None): """Setup defaults. Parameters ---------- data : Pandas.Series or dict Pandas Series with values indexed by electrodes. axes : matplotlib.axes.AxesSubplot object Axis object to render on. """ if axes is None: self.figure = plt.figure() axes = self.figure.gca() else: self.figure = axes.get_figure() self.axes = axes self.center = np.array((0, 0)) if isinstance(data, dict): self.data = pd.Series(data) elif isinstance(data, pd.Series): self.data = data elif data is None: self.data = None else: raise ValueError("Wrong type of value for 'data': {}".format( type(data))) @staticmethod def normalize_electrode_name(name): """Normalize electrode name. Parameters ---------- name : str Name of electrode to be normalized Examples -------- >>> TopoPlot.normalize_electrode_name('fpz') 'Fpz' >>> TopoPlot.normalize_electrode_name('AFZ') 'AFz' """ return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z') def draw_electrodes(self): """Draw electrodes.""" for electrode, position in ELECTRODES.items(): circle = plt.Circle(self.center + position, radius=0.04, fill=True, facecolor=(1, 1, 1)) self.axes.add_patch(circle) position = self.center + position self.axes.text(position[0], position[1], electrode, verticalalignment='center', horizontalalignment='center', size=6) def draw_head(self): """Draw outer head.""" circle = plt.Circle(self.center, radius=1, fill=False) self.axes.add_patch(circle) def draw_inner_head(self): """Draw inner head.""" circle = plt.Circle(self.center, radius=0.8, fill=False) self.axes.add_patch(circle) def draw_nose(self): """Draw nose.""" nose = plt.Line2D([sin(-0.1), 0, sin(0.1)], [cos(-0.1), 1.1, cos(0.1)], color=(0, 0, 0)) self.axes.add_line(nose) def draw_data(self, method='linear', number_of_contours=10): """Draw countours from provided data.""" if self.data is not None: # Coordinates for points to interpolate to xi, yi = np.mgrid[-1:1:100j, -1:1:100j] # Electrode positions for data to interpolate from points = [] for electrode in self.data.index: name = TopoPlot.normalize_electrode_name(electrode) points.append(ELECTRODES[name]) # Interpolate # TODO: Will not work with 2 electrodes. zi = griddata(points, self.data.values, (xi, yi), method=method) # Defaults if number_of_contours is None: number_of_contours = 10 # Draw plt.contourf(xi, yi, zi, number_of_contours) # TODO: center def draw(self, title=None, method='linear', number_of_contours=None): """Draw all components in topoplot including the data. Parameters ---------- title : str, optional Title to put on the plot methods : str, optional Interpolation method number_of_contours : int Number of contours in the colored plot. Examples -------- >>> import matplotlib.pyplot as plt >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4} >>> plt.ion() >>> topo_plot = TopoPlot(data) >>> topo_plot.draw() """ self.draw_head() self.draw_inner_head() self.draw_electrodes() self.draw_nose() self.draw_data(method=method, number_of_contours=number_of_contours) self.axes.axis((-1.2, 1.2, -1.2, 1.2)) self.axes.axis('equal') if title is not None: self.axes.set_title(title) class MultiPlot(TopoPlot): """Multiple plots organized topographically. References ---------- http://www.fieldtriptoolbox.org/reference/ft_multiploter """ def __init__(self, data=None, axes=None, xlim=None, ylim=None): """Setup defaults. Parameters ---------- data : Pandas.DataFrame Pandas DataFrame with values indexed by electrodes. axes : matplotlib.axes.AxesSubplot object Axis object to render on. """ if axes is None: self.figure = plt.figure() axes = self.figure.gca() else: self.figure = axes.get_figure() self.axes = axes # Contains a list of axes used to plot data data from individual # electrodes self._subaxes = [] self.xlim = xlim self.ylim = ylim self.center = np.array((0, 0)) if isinstance(data, pd.DataFrame): self.data = data elif data is None: self.data = None else: raise ValueError("Wrong type of value for 'data': {}".format( type(data))) def add_subplot_axes(self, ax, rect, axis_bgcolor=None): """Add subaxes to currect specified axes. References ---------- Pablo https://stackoverflow.com/users/2309442/pablo Pablo's answer to "Embedding small plots inside subplots in matplotlib" https://stackoverflow.com/questions/17458580/ """ # Modified from # https://stackoverflow.com/questions/17458580/ box = ax.get_position() width, height = box.width, box.height subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3])] subaxes_display_coords = ax.transData.transform(subaxes_box) trans_figure = self.figure.transFigure.inverted() subaxes_figure_coords = trans_figure.transform(subaxes_display_coords) x, y = subaxes_figure_coords[0, :] width, height = (subaxes_figure_coords[1, :] - subaxes_figure_coords[0, :]) subaxes = self.figure.add_axes( [x, y, width, height], axis_bgcolor=axis_bgcolor) x_labelsize = subaxes.get_xticklabels()[0].get_size() y_labelsize = subaxes.get_yticklabels()[0].get_size() x_labelsize *= rect[2] ** 0.5 y_labelsize *= rect[3] ** 0.5 subaxes.xaxis.set_tick_params(labelsize=x_labelsize) subaxes.yaxis.set_tick_params(labelsize=y_labelsize) return subaxes def draw_data(self, type='plot', width=None, height=None, xlim=None, ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'): """Draw data. Parameters ---------- type : 'plot', 'spectrogram', optional Type of plot xlim : 2-tuple of floats, optional X-axis limits ylim : 2-tuple of floats, optional Y-axis limits vmin : float, optional Minimum value for spectrogram colormap vmax : float, optional Maximum value for spectrogram colormap axis : bool, optional Determine whether the axis should be shown """ if self.data is not None: if ylim is None: if self.ylim is None and type != 'spectrogram': ylim = self.auto_ylim(xlim, yscale=yscale) else: ylim = self.ylim if xlim is None: xlim = self.xlim if vmin is None: vmin = 0 # Determine a suitable width for subaxes number_of_electrodes = len([ electrode for electrode in self.data.columns if electrode in ELECTRODES]) if width is None: if number_of_electrodes > 32: width = 0.15 else: width = 0.25 if height is None: height = 0.25 for electrode in self.data.columns: if electrode in ELECTRODES: # Axes and position x, y = ELECTRODES[electrode] subaxes = self.add_subplot_axes( self.axes, [x - width / 2, y - height / 2, width, height], axis_bgcolor='w') # Actual data plot if type == 'plot': self.data.ix[:, electrode].plot( ax=subaxes, xlim=xlim, ylim=ylim) if not axis: # x-axis trans = transforms.blended_transform_factory( subaxes.transAxes, subaxes.transData) line = lines.Line2D( (0, 1), (0, 0), transform=trans, color=(0, 0, 0)) subaxes.add_line(line) trans = transforms.blended_transform_factory( subaxes.transAxes, subaxes.transAxes) line = lines.Line2D( (0, 0), (0, 1), transform=trans, color=(0, 0, 0)) subaxes.add_line(line) elif type == 'spectrogram': spectrum, frequencies, midpoints, axes = plt.specgram( self.data.ix[:, electrode], Fs=self.data.sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes) # Adjust axis around spectrogram image. if xlim is None: xlim = midpoints[0], midpoints[-1] subaxes.set_xlim(xlim) if ylim is None: ylim = frequencies[0], frequencies[-1] subaxes.set_ylim(ylim) else: raise ValueError("Wrong value for 'type' argument") if not axis: subaxes.set_axis_off() # Annotation # http://matplotlib.org/users/transforms_tutorial.html subaxes.text(0.5, 0.95, electrode, transform=subaxes.transAxes, fontweight='bold', va='top', ha='center') subaxes.set_yticklabels([]) subaxes.set_xticklabels([]) self._subaxes.append(subaxes) @property def xlim(self): """Return xlim for subplots.""" lim = [ax.get_xlim() for ax in self._subaxes] if lim == []: lim = None return lim @xlim.setter def xlim(self, left=None, right=None): """Set x-axis limits on all subplots.""" for ax in self._subaxes: ax.set_xlim(left, right) self.figure.canvas.draw() @property def ylim(self): """Return ylim for subplots.""" lim = [ax.get_ylim() for ax in self._subaxes] if lim == []: lim = None return lim @ylim.setter def ylim(self, bottom=None, top=None): """Set y-axis limits on all subplots.""" for ax in self._subaxes: ax.set_ylim(bottom, top) self.figure.canvas.draw() @property def yscale(self): """Return yscale for subplots.""" yscales = [ax.get_yscale() for ax in self._subaxes] return yscales @yscale.setter def yscale(self, value='linear'): """Set y-axis limits on all subplots.""" for ax in self._subaxes: ax.set_yscale(value) self.figure.canvas.draw() def auto_ylim(self, xlim=None, yscale='linear'): """Return an estimate for a good ylim. Parameters ---------- xlim : 2-tuple, optional Limits in (the index of) the data from where the scaling should be computed. yscale : linear or log, optional Scaling of y-axis. """ electrodes = [col for col in self.data.columns if col in ELECTRODES] if xlim is None: data = self.data.ix[:, electrodes] else: indices = ((self.data.index >= xlim[0]) & (self.data.index <= xlim[1])) data = self.data.ix[indices, electrodes] min_data = data.min().min() max_data = data.max().max() abs_max = max(abs(min_data), max_data) if yscale == 'linear' or yscale == 'symlog': if min_data >= 0: ylim = 0, max_data else: ylim = -abs_max, abs_max elif yscale == 'log': if min_data > 0: ylim = min_data, max_data else: pseudo_zero = abs_max * 10 ** -5 ylim = pseudo_zero, abs_max else: raise ValueError('Wrong value to yscale: {}'.format(yscale)) return ylim def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'): """Draw all components in multiplot including the data. Parameters ---------- title : str, optional Title to put on the plot xlim : tuple of floats, optional X-axis limits used for each individual plots ylim : tuple of floats, optional Y-axis limits used for each individual plots """ self.axes.axis((-1.2, 1.2, -1.2, 1.2)) self.draw_head() self.draw_inner_head() self.draw_nose() self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax=vmax, axis=axis, yscale=yscale) if title is not None: self.axes.set_title(title) self.yscale = yscale def topoplot(data=None, axes=None, method='linear', number_of_contours=10, title=None, xlim=None, ylim=None): """Plot topographic map of the scalp in 2-D circular view. Draw the colored scalp map based on data in a Pandas Series where the values are indexed according to electrode name. Parameters ---------- data : pandas.Series or pandas.DataFrame, optional Series with values and indexed by electrode names. methods : str, optional Interpolation method number_of_contours : int Number of contours in the colored plot. xlim : 2-tuple of floats, optional Limits of x-axis in multiplot ylim : 2-tuple of floats, optional Limits of y-axis in multiplot References ---------- https://github.com/compmem/ptsa/blob/master/ptsa/plotting/topo.py http://sccn.ucsd.edu/~jung/tutorial/topoplot.htm Examples -------- >>> import matplotlib.pyplot as plt >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4} >>> plt.ion() >>> topo_plot = topoplot(data) """ if isinstance(data, pd.Series) or isinstance(data, dict) or data is None: topo_plot = TopoPlot(data=data, axes=axes) topo_plot.draw(title=title, method=method, number_of_contours=number_of_contours) return topo_plot elif isinstance(data, pd.DataFrame): multi_plot = MultiPlot(data=data, axes=axes) multi_plot.draw(title=title, xlim=xlim, ylim=ylim) return multi_plot def show(): """Show plot.""" plt.show() def main(args): """Handle command-line interface to topographic plot.""" xlim = args['--xlim'] if args['--xlim'] is not None: xlim = [float(lim) for lim in xlim.split(',')] if args['<file>'] is None: topoplot() else: filename = args['<file>'] if filename.lower().endswith('.csv'): from .core import read_csv df = read_csv(filename, index_col=0) if args['--transpose']: df = df.T if args['--sample-index'] is None: if args['--center'] is not None: df = df.center() topoplot(df, xlim=xlim) else: sample_index = int(args['--sample-index']) series = df.iloc[sample_index - 1, :] topoplot(series) else: exit('Only csv files handled') plt.show() if __name__ == '__main__': from docopt import docopt main(docopt(__doc__))
flexible
{ "blob_id": "5bd7160b6b2e283e221aeb0a6913e6d13511c1db", "index": 7073, "step-1": "<mask token>\n\n\nclass TopoPlot(object):\n <mask token>\n\n def __init__(self, data=None, axes=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.Series or dict\n Pandas Series with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self.center = np.array((0, 0))\n if isinstance(data, dict):\n self.data = pd.Series(data)\n elif isinstance(data, pd.Series):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n <mask token>\n\n def draw_electrodes(self):\n \"\"\"Draw electrodes.\"\"\"\n for electrode, position in ELECTRODES.items():\n circle = plt.Circle(self.center + position, radius=0.04, fill=\n True, facecolor=(1, 1, 1))\n self.axes.add_patch(circle)\n position = self.center + position\n self.axes.text(position[0], position[1], electrode,\n verticalalignment='center', horizontalalignment='center',\n size=6)\n\n def draw_head(self):\n \"\"\"Draw outer head.\"\"\"\n circle = plt.Circle(self.center, radius=1, fill=False)\n self.axes.add_patch(circle)\n <mask token>\n <mask token>\n\n def draw_data(self, method='linear', number_of_contours=10):\n \"\"\"Draw countours from provided data.\"\"\"\n if self.data is not None:\n xi, yi = np.mgrid[-1:1:100.0j, -1:1:100.0j]\n points = []\n for electrode in self.data.index:\n name = TopoPlot.normalize_electrode_name(electrode)\n points.append(ELECTRODES[name])\n zi = griddata(points, self.data.values, (xi, yi), method=method)\n if number_of_contours is None:\n number_of_contours = 10\n plt.contourf(xi, yi, zi, number_of_contours)\n\n def draw(self, title=None, method='linear', number_of_contours=None):\n \"\"\"Draw all components in topoplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n methods : str, optional\n Interpolation method\n number_of_contours : int\n Number of contours in the colored plot.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\n >>> plt.ion()\n >>> topo_plot = TopoPlot(data)\n >>> topo_plot.draw()\n\n \"\"\"\n self.draw_head()\n self.draw_inner_head()\n self.draw_electrodes()\n self.draw_nose()\n self.draw_data(method=method, number_of_contours=number_of_contours)\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.axes.axis('equal')\n if title is not None:\n self.axes.set_title(title)\n\n\nclass MultiPlot(TopoPlot):\n \"\"\"Multiple plots organized topographically.\n\n References\n ----------\n http://www.fieldtriptoolbox.org/reference/ft_multiploter\n\n \"\"\"\n\n def __init__(self, data=None, axes=None, xlim=None, ylim=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.DataFrame\n Pandas DataFrame with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self._subaxes = []\n self.xlim = xlim\n self.ylim = ylim\n self.center = np.array((0, 0))\n if isinstance(data, pd.DataFrame):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n def add_subplot_axes(self, ax, rect, axis_bgcolor=None):\n \"\"\"Add subaxes to currect specified axes.\n\n References\n ----------\n Pablo https://stackoverflow.com/users/2309442/pablo\n\n Pablo's answer to \"Embedding small plots inside subplots in matplotlib\"\n https://stackoverflow.com/questions/17458580/\n\n \"\"\"\n box = ax.get_position()\n width, height = box.width, box.height\n subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] +\n rect[3])]\n subaxes_display_coords = ax.transData.transform(subaxes_box)\n trans_figure = self.figure.transFigure.inverted()\n subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)\n x, y = subaxes_figure_coords[0, :]\n width, height = subaxes_figure_coords[1, :] - subaxes_figure_coords[\n 0, :]\n subaxes = self.figure.add_axes([x, y, width, height], axis_bgcolor=\n axis_bgcolor)\n x_labelsize = subaxes.get_xticklabels()[0].get_size()\n y_labelsize = subaxes.get_yticklabels()[0].get_size()\n x_labelsize *= rect[2] ** 0.5\n y_labelsize *= rect[3] ** 0.5\n subaxes.xaxis.set_tick_params(labelsize=x_labelsize)\n subaxes.yaxis.set_tick_params(labelsize=y_labelsize)\n return subaxes\n\n def draw_data(self, type='plot', width=None, height=None, xlim=None,\n ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw data.\n\n Parameters\n ----------\n type : 'plot', 'spectrogram', optional\n Type of plot\n xlim : 2-tuple of floats, optional\n X-axis limits\n ylim : 2-tuple of floats, optional\n Y-axis limits\n vmin : float, optional\n Minimum value for spectrogram colormap\n vmax : float, optional\n Maximum value for spectrogram colormap\n axis : bool, optional\n Determine whether the axis should be shown\n\n \"\"\"\n if self.data is not None:\n if ylim is None:\n if self.ylim is None and type != 'spectrogram':\n ylim = self.auto_ylim(xlim, yscale=yscale)\n else:\n ylim = self.ylim\n if xlim is None:\n xlim = self.xlim\n if vmin is None:\n vmin = 0\n number_of_electrodes = len([electrode for electrode in self.\n data.columns if electrode in ELECTRODES])\n if width is None:\n if number_of_electrodes > 32:\n width = 0.15\n else:\n width = 0.25\n if height is None:\n height = 0.25\n for electrode in self.data.columns:\n if electrode in ELECTRODES:\n x, y = ELECTRODES[electrode]\n subaxes = self.add_subplot_axes(self.axes, [x - width /\n 2, y - height / 2, width, height], axis_bgcolor='w')\n if type == 'plot':\n self.data.ix[:, electrode].plot(ax=subaxes, xlim=\n xlim, ylim=ylim)\n if not axis:\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transData)\n line = lines.Line2D((0, 1), (0, 0), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transAxes)\n line = lines.Line2D((0, 0), (0, 1), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n elif type == 'spectrogram':\n spectrum, frequencies, midpoints, axes = plt.specgram(\n self.data.ix[:, electrode], Fs=self.data.\n sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes)\n if xlim is None:\n xlim = midpoints[0], midpoints[-1]\n subaxes.set_xlim(xlim)\n if ylim is None:\n ylim = frequencies[0], frequencies[-1]\n subaxes.set_ylim(ylim)\n else:\n raise ValueError(\"Wrong value for 'type' argument\")\n if not axis:\n subaxes.set_axis_off()\n subaxes.text(0.5, 0.95, electrode, transform=subaxes.\n transAxes, fontweight='bold', va='top', ha='center')\n subaxes.set_yticklabels([])\n subaxes.set_xticklabels([])\n self._subaxes.append(subaxes)\n\n @property\n def xlim(self):\n \"\"\"Return xlim for subplots.\"\"\"\n lim = [ax.get_xlim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @xlim.setter\n def xlim(self, left=None, right=None):\n \"\"\"Set x-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_xlim(left, right)\n self.figure.canvas.draw()\n\n @property\n def ylim(self):\n \"\"\"Return ylim for subplots.\"\"\"\n lim = [ax.get_ylim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @ylim.setter\n def ylim(self, bottom=None, top=None):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_ylim(bottom, top)\n self.figure.canvas.draw()\n\n @property\n def yscale(self):\n \"\"\"Return yscale for subplots.\"\"\"\n yscales = [ax.get_yscale() for ax in self._subaxes]\n return yscales\n\n @yscale.setter\n def yscale(self, value='linear'):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_yscale(value)\n self.figure.canvas.draw()\n\n def auto_ylim(self, xlim=None, yscale='linear'):\n \"\"\"Return an estimate for a good ylim.\n\n Parameters\n ----------\n xlim : 2-tuple, optional\n Limits in (the index of) the data from where the scaling should be\n computed.\n yscale : linear or log, optional\n Scaling of y-axis.\n\n \"\"\"\n electrodes = [col for col in self.data.columns if col in ELECTRODES]\n if xlim is None:\n data = self.data.ix[:, electrodes]\n else:\n indices = (self.data.index >= xlim[0]) & (self.data.index <=\n xlim[1])\n data = self.data.ix[indices, electrodes]\n min_data = data.min().min()\n max_data = data.max().max()\n abs_max = max(abs(min_data), max_data)\n if yscale == 'linear' or yscale == 'symlog':\n if min_data >= 0:\n ylim = 0, max_data\n else:\n ylim = -abs_max, abs_max\n elif yscale == 'log':\n if min_data > 0:\n ylim = min_data, max_data\n else:\n pseudo_zero = abs_max * 10 ** -5\n ylim = pseudo_zero, abs_max\n else:\n raise ValueError('Wrong value to yscale: {}'.format(yscale))\n return ylim\n\n def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None,\n vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw all components in multiplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n xlim : tuple of floats, optional\n X-axis limits used for each individual plots\n ylim : tuple of floats, optional\n Y-axis limits used for each individual plots\n\n \"\"\"\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.draw_head()\n self.draw_inner_head()\n self.draw_nose()\n self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax=\n vmax, axis=axis, yscale=yscale)\n if title is not None:\n self.axes.set_title(title)\n self.yscale = yscale\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass TopoPlot(object):\n <mask token>\n\n def __init__(self, data=None, axes=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.Series or dict\n Pandas Series with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self.center = np.array((0, 0))\n if isinstance(data, dict):\n self.data = pd.Series(data)\n elif isinstance(data, pd.Series):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n @staticmethod\n def normalize_electrode_name(name):\n \"\"\"Normalize electrode name.\n\n Parameters\n ----------\n name : str\n Name of electrode to be normalized\n\n Examples\n --------\n >>> TopoPlot.normalize_electrode_name('fpz')\n 'Fpz'\n\n >>> TopoPlot.normalize_electrode_name('AFZ')\n 'AFz'\n\n \"\"\"\n return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z')\n\n def draw_electrodes(self):\n \"\"\"Draw electrodes.\"\"\"\n for electrode, position in ELECTRODES.items():\n circle = plt.Circle(self.center + position, radius=0.04, fill=\n True, facecolor=(1, 1, 1))\n self.axes.add_patch(circle)\n position = self.center + position\n self.axes.text(position[0], position[1], electrode,\n verticalalignment='center', horizontalalignment='center',\n size=6)\n\n def draw_head(self):\n \"\"\"Draw outer head.\"\"\"\n circle = plt.Circle(self.center, radius=1, fill=False)\n self.axes.add_patch(circle)\n\n def draw_inner_head(self):\n \"\"\"Draw inner head.\"\"\"\n circle = plt.Circle(self.center, radius=0.8, fill=False)\n self.axes.add_patch(circle)\n\n def draw_nose(self):\n \"\"\"Draw nose.\"\"\"\n nose = plt.Line2D([sin(-0.1), 0, sin(0.1)], [cos(-0.1), 1.1, cos(\n 0.1)], color=(0, 0, 0))\n self.axes.add_line(nose)\n\n def draw_data(self, method='linear', number_of_contours=10):\n \"\"\"Draw countours from provided data.\"\"\"\n if self.data is not None:\n xi, yi = np.mgrid[-1:1:100.0j, -1:1:100.0j]\n points = []\n for electrode in self.data.index:\n name = TopoPlot.normalize_electrode_name(electrode)\n points.append(ELECTRODES[name])\n zi = griddata(points, self.data.values, (xi, yi), method=method)\n if number_of_contours is None:\n number_of_contours = 10\n plt.contourf(xi, yi, zi, number_of_contours)\n\n def draw(self, title=None, method='linear', number_of_contours=None):\n \"\"\"Draw all components in topoplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n methods : str, optional\n Interpolation method\n number_of_contours : int\n Number of contours in the colored plot.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\n >>> plt.ion()\n >>> topo_plot = TopoPlot(data)\n >>> topo_plot.draw()\n\n \"\"\"\n self.draw_head()\n self.draw_inner_head()\n self.draw_electrodes()\n self.draw_nose()\n self.draw_data(method=method, number_of_contours=number_of_contours)\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.axes.axis('equal')\n if title is not None:\n self.axes.set_title(title)\n\n\nclass MultiPlot(TopoPlot):\n \"\"\"Multiple plots organized topographically.\n\n References\n ----------\n http://www.fieldtriptoolbox.org/reference/ft_multiploter\n\n \"\"\"\n\n def __init__(self, data=None, axes=None, xlim=None, ylim=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.DataFrame\n Pandas DataFrame with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self._subaxes = []\n self.xlim = xlim\n self.ylim = ylim\n self.center = np.array((0, 0))\n if isinstance(data, pd.DataFrame):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n def add_subplot_axes(self, ax, rect, axis_bgcolor=None):\n \"\"\"Add subaxes to currect specified axes.\n\n References\n ----------\n Pablo https://stackoverflow.com/users/2309442/pablo\n\n Pablo's answer to \"Embedding small plots inside subplots in matplotlib\"\n https://stackoverflow.com/questions/17458580/\n\n \"\"\"\n box = ax.get_position()\n width, height = box.width, box.height\n subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] +\n rect[3])]\n subaxes_display_coords = ax.transData.transform(subaxes_box)\n trans_figure = self.figure.transFigure.inverted()\n subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)\n x, y = subaxes_figure_coords[0, :]\n width, height = subaxes_figure_coords[1, :] - subaxes_figure_coords[\n 0, :]\n subaxes = self.figure.add_axes([x, y, width, height], axis_bgcolor=\n axis_bgcolor)\n x_labelsize = subaxes.get_xticklabels()[0].get_size()\n y_labelsize = subaxes.get_yticklabels()[0].get_size()\n x_labelsize *= rect[2] ** 0.5\n y_labelsize *= rect[3] ** 0.5\n subaxes.xaxis.set_tick_params(labelsize=x_labelsize)\n subaxes.yaxis.set_tick_params(labelsize=y_labelsize)\n return subaxes\n\n def draw_data(self, type='plot', width=None, height=None, xlim=None,\n ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw data.\n\n Parameters\n ----------\n type : 'plot', 'spectrogram', optional\n Type of plot\n xlim : 2-tuple of floats, optional\n X-axis limits\n ylim : 2-tuple of floats, optional\n Y-axis limits\n vmin : float, optional\n Minimum value for spectrogram colormap\n vmax : float, optional\n Maximum value for spectrogram colormap\n axis : bool, optional\n Determine whether the axis should be shown\n\n \"\"\"\n if self.data is not None:\n if ylim is None:\n if self.ylim is None and type != 'spectrogram':\n ylim = self.auto_ylim(xlim, yscale=yscale)\n else:\n ylim = self.ylim\n if xlim is None:\n xlim = self.xlim\n if vmin is None:\n vmin = 0\n number_of_electrodes = len([electrode for electrode in self.\n data.columns if electrode in ELECTRODES])\n if width is None:\n if number_of_electrodes > 32:\n width = 0.15\n else:\n width = 0.25\n if height is None:\n height = 0.25\n for electrode in self.data.columns:\n if electrode in ELECTRODES:\n x, y = ELECTRODES[electrode]\n subaxes = self.add_subplot_axes(self.axes, [x - width /\n 2, y - height / 2, width, height], axis_bgcolor='w')\n if type == 'plot':\n self.data.ix[:, electrode].plot(ax=subaxes, xlim=\n xlim, ylim=ylim)\n if not axis:\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transData)\n line = lines.Line2D((0, 1), (0, 0), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transAxes)\n line = lines.Line2D((0, 0), (0, 1), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n elif type == 'spectrogram':\n spectrum, frequencies, midpoints, axes = plt.specgram(\n self.data.ix[:, electrode], Fs=self.data.\n sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes)\n if xlim is None:\n xlim = midpoints[0], midpoints[-1]\n subaxes.set_xlim(xlim)\n if ylim is None:\n ylim = frequencies[0], frequencies[-1]\n subaxes.set_ylim(ylim)\n else:\n raise ValueError(\"Wrong value for 'type' argument\")\n if not axis:\n subaxes.set_axis_off()\n subaxes.text(0.5, 0.95, electrode, transform=subaxes.\n transAxes, fontweight='bold', va='top', ha='center')\n subaxes.set_yticklabels([])\n subaxes.set_xticklabels([])\n self._subaxes.append(subaxes)\n\n @property\n def xlim(self):\n \"\"\"Return xlim for subplots.\"\"\"\n lim = [ax.get_xlim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @xlim.setter\n def xlim(self, left=None, right=None):\n \"\"\"Set x-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_xlim(left, right)\n self.figure.canvas.draw()\n\n @property\n def ylim(self):\n \"\"\"Return ylim for subplots.\"\"\"\n lim = [ax.get_ylim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @ylim.setter\n def ylim(self, bottom=None, top=None):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_ylim(bottom, top)\n self.figure.canvas.draw()\n\n @property\n def yscale(self):\n \"\"\"Return yscale for subplots.\"\"\"\n yscales = [ax.get_yscale() for ax in self._subaxes]\n return yscales\n\n @yscale.setter\n def yscale(self, value='linear'):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_yscale(value)\n self.figure.canvas.draw()\n\n def auto_ylim(self, xlim=None, yscale='linear'):\n \"\"\"Return an estimate for a good ylim.\n\n Parameters\n ----------\n xlim : 2-tuple, optional\n Limits in (the index of) the data from where the scaling should be\n computed.\n yscale : linear or log, optional\n Scaling of y-axis.\n\n \"\"\"\n electrodes = [col for col in self.data.columns if col in ELECTRODES]\n if xlim is None:\n data = self.data.ix[:, electrodes]\n else:\n indices = (self.data.index >= xlim[0]) & (self.data.index <=\n xlim[1])\n data = self.data.ix[indices, electrodes]\n min_data = data.min().min()\n max_data = data.max().max()\n abs_max = max(abs(min_data), max_data)\n if yscale == 'linear' or yscale == 'symlog':\n if min_data >= 0:\n ylim = 0, max_data\n else:\n ylim = -abs_max, abs_max\n elif yscale == 'log':\n if min_data > 0:\n ylim = min_data, max_data\n else:\n pseudo_zero = abs_max * 10 ** -5\n ylim = pseudo_zero, abs_max\n else:\n raise ValueError('Wrong value to yscale: {}'.format(yscale))\n return ylim\n\n def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None,\n vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw all components in multiplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n xlim : tuple of floats, optional\n X-axis limits used for each individual plots\n ylim : tuple of floats, optional\n Y-axis limits used for each individual plots\n\n \"\"\"\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.draw_head()\n self.draw_inner_head()\n self.draw_nose()\n self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax=\n vmax, axis=axis, yscale=yscale)\n if title is not None:\n self.axes.set_title(title)\n self.yscale = yscale\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass TopoPlot(object):\n \"\"\"Topographic plot.\"\"\"\n\n def __init__(self, data=None, axes=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.Series or dict\n Pandas Series with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self.center = np.array((0, 0))\n if isinstance(data, dict):\n self.data = pd.Series(data)\n elif isinstance(data, pd.Series):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n @staticmethod\n def normalize_electrode_name(name):\n \"\"\"Normalize electrode name.\n\n Parameters\n ----------\n name : str\n Name of electrode to be normalized\n\n Examples\n --------\n >>> TopoPlot.normalize_electrode_name('fpz')\n 'Fpz'\n\n >>> TopoPlot.normalize_electrode_name('AFZ')\n 'AFz'\n\n \"\"\"\n return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z')\n\n def draw_electrodes(self):\n \"\"\"Draw electrodes.\"\"\"\n for electrode, position in ELECTRODES.items():\n circle = plt.Circle(self.center + position, radius=0.04, fill=\n True, facecolor=(1, 1, 1))\n self.axes.add_patch(circle)\n position = self.center + position\n self.axes.text(position[0], position[1], electrode,\n verticalalignment='center', horizontalalignment='center',\n size=6)\n\n def draw_head(self):\n \"\"\"Draw outer head.\"\"\"\n circle = plt.Circle(self.center, radius=1, fill=False)\n self.axes.add_patch(circle)\n\n def draw_inner_head(self):\n \"\"\"Draw inner head.\"\"\"\n circle = plt.Circle(self.center, radius=0.8, fill=False)\n self.axes.add_patch(circle)\n\n def draw_nose(self):\n \"\"\"Draw nose.\"\"\"\n nose = plt.Line2D([sin(-0.1), 0, sin(0.1)], [cos(-0.1), 1.1, cos(\n 0.1)], color=(0, 0, 0))\n self.axes.add_line(nose)\n\n def draw_data(self, method='linear', number_of_contours=10):\n \"\"\"Draw countours from provided data.\"\"\"\n if self.data is not None:\n xi, yi = np.mgrid[-1:1:100.0j, -1:1:100.0j]\n points = []\n for electrode in self.data.index:\n name = TopoPlot.normalize_electrode_name(electrode)\n points.append(ELECTRODES[name])\n zi = griddata(points, self.data.values, (xi, yi), method=method)\n if number_of_contours is None:\n number_of_contours = 10\n plt.contourf(xi, yi, zi, number_of_contours)\n\n def draw(self, title=None, method='linear', number_of_contours=None):\n \"\"\"Draw all components in topoplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n methods : str, optional\n Interpolation method\n number_of_contours : int\n Number of contours in the colored plot.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\n >>> plt.ion()\n >>> topo_plot = TopoPlot(data)\n >>> topo_plot.draw()\n\n \"\"\"\n self.draw_head()\n self.draw_inner_head()\n self.draw_electrodes()\n self.draw_nose()\n self.draw_data(method=method, number_of_contours=number_of_contours)\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.axes.axis('equal')\n if title is not None:\n self.axes.set_title(title)\n\n\nclass MultiPlot(TopoPlot):\n \"\"\"Multiple plots organized topographically.\n\n References\n ----------\n http://www.fieldtriptoolbox.org/reference/ft_multiploter\n\n \"\"\"\n\n def __init__(self, data=None, axes=None, xlim=None, ylim=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.DataFrame\n Pandas DataFrame with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self._subaxes = []\n self.xlim = xlim\n self.ylim = ylim\n self.center = np.array((0, 0))\n if isinstance(data, pd.DataFrame):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n def add_subplot_axes(self, ax, rect, axis_bgcolor=None):\n \"\"\"Add subaxes to currect specified axes.\n\n References\n ----------\n Pablo https://stackoverflow.com/users/2309442/pablo\n\n Pablo's answer to \"Embedding small plots inside subplots in matplotlib\"\n https://stackoverflow.com/questions/17458580/\n\n \"\"\"\n box = ax.get_position()\n width, height = box.width, box.height\n subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] +\n rect[3])]\n subaxes_display_coords = ax.transData.transform(subaxes_box)\n trans_figure = self.figure.transFigure.inverted()\n subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)\n x, y = subaxes_figure_coords[0, :]\n width, height = subaxes_figure_coords[1, :] - subaxes_figure_coords[\n 0, :]\n subaxes = self.figure.add_axes([x, y, width, height], axis_bgcolor=\n axis_bgcolor)\n x_labelsize = subaxes.get_xticklabels()[0].get_size()\n y_labelsize = subaxes.get_yticklabels()[0].get_size()\n x_labelsize *= rect[2] ** 0.5\n y_labelsize *= rect[3] ** 0.5\n subaxes.xaxis.set_tick_params(labelsize=x_labelsize)\n subaxes.yaxis.set_tick_params(labelsize=y_labelsize)\n return subaxes\n\n def draw_data(self, type='plot', width=None, height=None, xlim=None,\n ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw data.\n\n Parameters\n ----------\n type : 'plot', 'spectrogram', optional\n Type of plot\n xlim : 2-tuple of floats, optional\n X-axis limits\n ylim : 2-tuple of floats, optional\n Y-axis limits\n vmin : float, optional\n Minimum value for spectrogram colormap\n vmax : float, optional\n Maximum value for spectrogram colormap\n axis : bool, optional\n Determine whether the axis should be shown\n\n \"\"\"\n if self.data is not None:\n if ylim is None:\n if self.ylim is None and type != 'spectrogram':\n ylim = self.auto_ylim(xlim, yscale=yscale)\n else:\n ylim = self.ylim\n if xlim is None:\n xlim = self.xlim\n if vmin is None:\n vmin = 0\n number_of_electrodes = len([electrode for electrode in self.\n data.columns if electrode in ELECTRODES])\n if width is None:\n if number_of_electrodes > 32:\n width = 0.15\n else:\n width = 0.25\n if height is None:\n height = 0.25\n for electrode in self.data.columns:\n if electrode in ELECTRODES:\n x, y = ELECTRODES[electrode]\n subaxes = self.add_subplot_axes(self.axes, [x - width /\n 2, y - height / 2, width, height], axis_bgcolor='w')\n if type == 'plot':\n self.data.ix[:, electrode].plot(ax=subaxes, xlim=\n xlim, ylim=ylim)\n if not axis:\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transData)\n line = lines.Line2D((0, 1), (0, 0), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transAxes)\n line = lines.Line2D((0, 0), (0, 1), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n elif type == 'spectrogram':\n spectrum, frequencies, midpoints, axes = plt.specgram(\n self.data.ix[:, electrode], Fs=self.data.\n sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes)\n if xlim is None:\n xlim = midpoints[0], midpoints[-1]\n subaxes.set_xlim(xlim)\n if ylim is None:\n ylim = frequencies[0], frequencies[-1]\n subaxes.set_ylim(ylim)\n else:\n raise ValueError(\"Wrong value for 'type' argument\")\n if not axis:\n subaxes.set_axis_off()\n subaxes.text(0.5, 0.95, electrode, transform=subaxes.\n transAxes, fontweight='bold', va='top', ha='center')\n subaxes.set_yticklabels([])\n subaxes.set_xticklabels([])\n self._subaxes.append(subaxes)\n\n @property\n def xlim(self):\n \"\"\"Return xlim for subplots.\"\"\"\n lim = [ax.get_xlim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @xlim.setter\n def xlim(self, left=None, right=None):\n \"\"\"Set x-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_xlim(left, right)\n self.figure.canvas.draw()\n\n @property\n def ylim(self):\n \"\"\"Return ylim for subplots.\"\"\"\n lim = [ax.get_ylim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @ylim.setter\n def ylim(self, bottom=None, top=None):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_ylim(bottom, top)\n self.figure.canvas.draw()\n\n @property\n def yscale(self):\n \"\"\"Return yscale for subplots.\"\"\"\n yscales = [ax.get_yscale() for ax in self._subaxes]\n return yscales\n\n @yscale.setter\n def yscale(self, value='linear'):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_yscale(value)\n self.figure.canvas.draw()\n\n def auto_ylim(self, xlim=None, yscale='linear'):\n \"\"\"Return an estimate for a good ylim.\n\n Parameters\n ----------\n xlim : 2-tuple, optional\n Limits in (the index of) the data from where the scaling should be\n computed.\n yscale : linear or log, optional\n Scaling of y-axis.\n\n \"\"\"\n electrodes = [col for col in self.data.columns if col in ELECTRODES]\n if xlim is None:\n data = self.data.ix[:, electrodes]\n else:\n indices = (self.data.index >= xlim[0]) & (self.data.index <=\n xlim[1])\n data = self.data.ix[indices, electrodes]\n min_data = data.min().min()\n max_data = data.max().max()\n abs_max = max(abs(min_data), max_data)\n if yscale == 'linear' or yscale == 'symlog':\n if min_data >= 0:\n ylim = 0, max_data\n else:\n ylim = -abs_max, abs_max\n elif yscale == 'log':\n if min_data > 0:\n ylim = min_data, max_data\n else:\n pseudo_zero = abs_max * 10 ** -5\n ylim = pseudo_zero, abs_max\n else:\n raise ValueError('Wrong value to yscale: {}'.format(yscale))\n return ylim\n\n def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None,\n vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw all components in multiplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n xlim : tuple of floats, optional\n X-axis limits used for each individual plots\n ylim : tuple of floats, optional\n Y-axis limits used for each individual plots\n\n \"\"\"\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.draw_head()\n self.draw_inner_head()\n self.draw_nose()\n self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax=\n vmax, axis=axis, yscale=yscale)\n if title is not None:\n self.axes.set_title(title)\n self.yscale = yscale\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass TopoPlot(object):\n \"\"\"Topographic plot.\"\"\"\n\n def __init__(self, data=None, axes=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.Series or dict\n Pandas Series with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self.center = np.array((0, 0))\n if isinstance(data, dict):\n self.data = pd.Series(data)\n elif isinstance(data, pd.Series):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n @staticmethod\n def normalize_electrode_name(name):\n \"\"\"Normalize electrode name.\n\n Parameters\n ----------\n name : str\n Name of electrode to be normalized\n\n Examples\n --------\n >>> TopoPlot.normalize_electrode_name('fpz')\n 'Fpz'\n\n >>> TopoPlot.normalize_electrode_name('AFZ')\n 'AFz'\n\n \"\"\"\n return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z')\n\n def draw_electrodes(self):\n \"\"\"Draw electrodes.\"\"\"\n for electrode, position in ELECTRODES.items():\n circle = plt.Circle(self.center + position, radius=0.04, fill=\n True, facecolor=(1, 1, 1))\n self.axes.add_patch(circle)\n position = self.center + position\n self.axes.text(position[0], position[1], electrode,\n verticalalignment='center', horizontalalignment='center',\n size=6)\n\n def draw_head(self):\n \"\"\"Draw outer head.\"\"\"\n circle = plt.Circle(self.center, radius=1, fill=False)\n self.axes.add_patch(circle)\n\n def draw_inner_head(self):\n \"\"\"Draw inner head.\"\"\"\n circle = plt.Circle(self.center, radius=0.8, fill=False)\n self.axes.add_patch(circle)\n\n def draw_nose(self):\n \"\"\"Draw nose.\"\"\"\n nose = plt.Line2D([sin(-0.1), 0, sin(0.1)], [cos(-0.1), 1.1, cos(\n 0.1)], color=(0, 0, 0))\n self.axes.add_line(nose)\n\n def draw_data(self, method='linear', number_of_contours=10):\n \"\"\"Draw countours from provided data.\"\"\"\n if self.data is not None:\n xi, yi = np.mgrid[-1:1:100.0j, -1:1:100.0j]\n points = []\n for electrode in self.data.index:\n name = TopoPlot.normalize_electrode_name(electrode)\n points.append(ELECTRODES[name])\n zi = griddata(points, self.data.values, (xi, yi), method=method)\n if number_of_contours is None:\n number_of_contours = 10\n plt.contourf(xi, yi, zi, number_of_contours)\n\n def draw(self, title=None, method='linear', number_of_contours=None):\n \"\"\"Draw all components in topoplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n methods : str, optional\n Interpolation method\n number_of_contours : int\n Number of contours in the colored plot.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\n >>> plt.ion()\n >>> topo_plot = TopoPlot(data)\n >>> topo_plot.draw()\n\n \"\"\"\n self.draw_head()\n self.draw_inner_head()\n self.draw_electrodes()\n self.draw_nose()\n self.draw_data(method=method, number_of_contours=number_of_contours)\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.axes.axis('equal')\n if title is not None:\n self.axes.set_title(title)\n\n\nclass MultiPlot(TopoPlot):\n \"\"\"Multiple plots organized topographically.\n\n References\n ----------\n http://www.fieldtriptoolbox.org/reference/ft_multiploter\n\n \"\"\"\n\n def __init__(self, data=None, axes=None, xlim=None, ylim=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.DataFrame\n Pandas DataFrame with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self._subaxes = []\n self.xlim = xlim\n self.ylim = ylim\n self.center = np.array((0, 0))\n if isinstance(data, pd.DataFrame):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n def add_subplot_axes(self, ax, rect, axis_bgcolor=None):\n \"\"\"Add subaxes to currect specified axes.\n\n References\n ----------\n Pablo https://stackoverflow.com/users/2309442/pablo\n\n Pablo's answer to \"Embedding small plots inside subplots in matplotlib\"\n https://stackoverflow.com/questions/17458580/\n\n \"\"\"\n box = ax.get_position()\n width, height = box.width, box.height\n subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] +\n rect[3])]\n subaxes_display_coords = ax.transData.transform(subaxes_box)\n trans_figure = self.figure.transFigure.inverted()\n subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)\n x, y = subaxes_figure_coords[0, :]\n width, height = subaxes_figure_coords[1, :] - subaxes_figure_coords[\n 0, :]\n subaxes = self.figure.add_axes([x, y, width, height], axis_bgcolor=\n axis_bgcolor)\n x_labelsize = subaxes.get_xticklabels()[0].get_size()\n y_labelsize = subaxes.get_yticklabels()[0].get_size()\n x_labelsize *= rect[2] ** 0.5\n y_labelsize *= rect[3] ** 0.5\n subaxes.xaxis.set_tick_params(labelsize=x_labelsize)\n subaxes.yaxis.set_tick_params(labelsize=y_labelsize)\n return subaxes\n\n def draw_data(self, type='plot', width=None, height=None, xlim=None,\n ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw data.\n\n Parameters\n ----------\n type : 'plot', 'spectrogram', optional\n Type of plot\n xlim : 2-tuple of floats, optional\n X-axis limits\n ylim : 2-tuple of floats, optional\n Y-axis limits\n vmin : float, optional\n Minimum value for spectrogram colormap\n vmax : float, optional\n Maximum value for spectrogram colormap\n axis : bool, optional\n Determine whether the axis should be shown\n\n \"\"\"\n if self.data is not None:\n if ylim is None:\n if self.ylim is None and type != 'spectrogram':\n ylim = self.auto_ylim(xlim, yscale=yscale)\n else:\n ylim = self.ylim\n if xlim is None:\n xlim = self.xlim\n if vmin is None:\n vmin = 0\n number_of_electrodes = len([electrode for electrode in self.\n data.columns if electrode in ELECTRODES])\n if width is None:\n if number_of_electrodes > 32:\n width = 0.15\n else:\n width = 0.25\n if height is None:\n height = 0.25\n for electrode in self.data.columns:\n if electrode in ELECTRODES:\n x, y = ELECTRODES[electrode]\n subaxes = self.add_subplot_axes(self.axes, [x - width /\n 2, y - height / 2, width, height], axis_bgcolor='w')\n if type == 'plot':\n self.data.ix[:, electrode].plot(ax=subaxes, xlim=\n xlim, ylim=ylim)\n if not axis:\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transData)\n line = lines.Line2D((0, 1), (0, 0), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transAxes)\n line = lines.Line2D((0, 0), (0, 1), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n elif type == 'spectrogram':\n spectrum, frequencies, midpoints, axes = plt.specgram(\n self.data.ix[:, electrode], Fs=self.data.\n sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes)\n if xlim is None:\n xlim = midpoints[0], midpoints[-1]\n subaxes.set_xlim(xlim)\n if ylim is None:\n ylim = frequencies[0], frequencies[-1]\n subaxes.set_ylim(ylim)\n else:\n raise ValueError(\"Wrong value for 'type' argument\")\n if not axis:\n subaxes.set_axis_off()\n subaxes.text(0.5, 0.95, electrode, transform=subaxes.\n transAxes, fontweight='bold', va='top', ha='center')\n subaxes.set_yticklabels([])\n subaxes.set_xticklabels([])\n self._subaxes.append(subaxes)\n\n @property\n def xlim(self):\n \"\"\"Return xlim for subplots.\"\"\"\n lim = [ax.get_xlim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @xlim.setter\n def xlim(self, left=None, right=None):\n \"\"\"Set x-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_xlim(left, right)\n self.figure.canvas.draw()\n\n @property\n def ylim(self):\n \"\"\"Return ylim for subplots.\"\"\"\n lim = [ax.get_ylim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @ylim.setter\n def ylim(self, bottom=None, top=None):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_ylim(bottom, top)\n self.figure.canvas.draw()\n\n @property\n def yscale(self):\n \"\"\"Return yscale for subplots.\"\"\"\n yscales = [ax.get_yscale() for ax in self._subaxes]\n return yscales\n\n @yscale.setter\n def yscale(self, value='linear'):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_yscale(value)\n self.figure.canvas.draw()\n\n def auto_ylim(self, xlim=None, yscale='linear'):\n \"\"\"Return an estimate for a good ylim.\n\n Parameters\n ----------\n xlim : 2-tuple, optional\n Limits in (the index of) the data from where the scaling should be\n computed.\n yscale : linear or log, optional\n Scaling of y-axis.\n\n \"\"\"\n electrodes = [col for col in self.data.columns if col in ELECTRODES]\n if xlim is None:\n data = self.data.ix[:, electrodes]\n else:\n indices = (self.data.index >= xlim[0]) & (self.data.index <=\n xlim[1])\n data = self.data.ix[indices, electrodes]\n min_data = data.min().min()\n max_data = data.max().max()\n abs_max = max(abs(min_data), max_data)\n if yscale == 'linear' or yscale == 'symlog':\n if min_data >= 0:\n ylim = 0, max_data\n else:\n ylim = -abs_max, abs_max\n elif yscale == 'log':\n if min_data > 0:\n ylim = min_data, max_data\n else:\n pseudo_zero = abs_max * 10 ** -5\n ylim = pseudo_zero, abs_max\n else:\n raise ValueError('Wrong value to yscale: {}'.format(yscale))\n return ylim\n\n def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None,\n vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw all components in multiplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n xlim : tuple of floats, optional\n X-axis limits used for each individual plots\n ylim : tuple of floats, optional\n Y-axis limits used for each individual plots\n\n \"\"\"\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.draw_head()\n self.draw_inner_head()\n self.draw_nose()\n self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax=\n vmax, axis=axis, yscale=yscale)\n if title is not None:\n self.axes.set_title(title)\n self.yscale = yscale\n\n\ndef topoplot(data=None, axes=None, method='linear', number_of_contours=10,\n title=None, xlim=None, ylim=None):\n \"\"\"Plot topographic map of the scalp in 2-D circular view.\n\n Draw the colored scalp map based on data in a Pandas Series where\n the values are indexed according to electrode name.\n\n Parameters\n ----------\n data : pandas.Series or pandas.DataFrame, optional\n Series with values and indexed by electrode names.\n methods : str, optional\n Interpolation method\n number_of_contours : int\n Number of contours in the colored plot.\n xlim : 2-tuple of floats, optional\n Limits of x-axis in multiplot\n ylim : 2-tuple of floats, optional\n Limits of y-axis in multiplot\n\n References\n ----------\n https://github.com/compmem/ptsa/blob/master/ptsa/plotting/topo.py\n\n http://sccn.ucsd.edu/~jung/tutorial/topoplot.htm\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\n >>> plt.ion()\n >>> topo_plot = topoplot(data)\n\n \"\"\"\n if isinstance(data, pd.Series) or isinstance(data, dict) or data is None:\n topo_plot = TopoPlot(data=data, axes=axes)\n topo_plot.draw(title=title, method=method, number_of_contours=\n number_of_contours)\n return topo_plot\n elif isinstance(data, pd.DataFrame):\n multi_plot = MultiPlot(data=data, axes=axes)\n multi_plot.draw(title=title, xlim=xlim, ylim=ylim)\n return multi_plot\n\n\ndef show():\n \"\"\"Show plot.\"\"\"\n plt.show()\n\n\n<mask token>\n", "step-5": "#!/usr/bin/env python\r\n\"\"\"\r\nPlot EEG data.\r\n\r\nUsage:\r\n plotting.py [options] [<file>]\r\n\r\nOptions:\r\n -h --help Show this screen.\r\n --version Show version.\r\n --center Center the data before plotting\r\n --sample-index=N Row index (indexed from one).\r\n --transpose Transpose data.\r\n --xlim=lim X-axis limits.\r\n\r\nData\r\n----\r\nELECTRODES : dict\r\n Dictionary indexed by electrode name with 2D positions as values\r\n\r\nReferences\r\n----------\r\nThe five percent electrode system for high-resolution EEG and ERP\r\nmeasurement, Robert Oostenveld, Peter Praamstra.\r\n\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import, division, print_function\r\n\r\nfrom math import cos, pi, sin\r\n\r\nimport matplotlib.lines as lines\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.transforms as transforms\r\n\r\nimport numpy as np\r\n\r\nimport pandas as pd\r\n\r\nfrom scipy.interpolate import griddata\r\n\r\n\r\n__all__ = ('ELECTRODES', 'MultiPlot', 'TopoPlot', 'topoplot')\r\n\r\n\r\nELECTRODES = {\r\n 'AF3': (-0.25, 0.62),\r\n 'AF4': (0.25, 0.62),\r\n 'AF7': (0.8 * cos(0.7 * pi), 0.8 * sin(0.7 * pi)),\r\n 'AF8': (0.8 * cos(0.3 * pi), 0.8 * sin(0.3 * pi)),\r\n 'AFz': (0, 0.6),\r\n 'C1': (-0.2, 0),\r\n 'C2': (0.2, 0),\r\n 'C3': (-0.4, 0),\r\n 'C4': (0.4, 0),\r\n 'C5': (-0.6, 0),\r\n 'C6': (0.6, 0),\r\n 'CP1': (-0.18, -0.2),\r\n 'CP2': (0.18, -0.2),\r\n 'CP3': (-0.36, 0.4 * sin(1.17 * pi)),\r\n 'CP4': (0.36, 0.4 * sin(1.83 * pi)),\r\n 'CP5': (0.6 * cos(1.12 * pi), 0.6 * sin(1.12 * pi)),\r\n 'CP6': (0.6 * cos(1.88 * pi), 0.6 * sin(1.88 * pi)),\r\n 'CPz': (0, -0.2),\r\n 'Cz': (0, 0),\r\n 'F1': (-0.18, 0.4),\r\n 'F2': (0.18, 0.4),\r\n 'F3': (-0.35, 0.41),\r\n 'F4': (0.35, 0.41),\r\n 'F5': (-0.5, 0.43),\r\n 'F6': (0.5, 0.43),\r\n 'F7': (0.8 * cos(0.8 * pi), 0.8 * sin(0.8 * pi)),\r\n 'F8': (0.8 * cos(0.2 * pi), 0.8 * sin(0.2 * pi)),\r\n 'FC1': (-0.2, 0.21),\r\n 'FC2': (0.2, 0.21),\r\n 'FC3': (-0.39, 0.22),\r\n 'FC4': (0.39, 0.22),\r\n 'FC5': (-0.57, 0.23),\r\n 'FC6': (0.57, 0.23),\r\n 'FCz': (0, 0.2),\r\n 'FP1': (0.8 * cos(0.6 * pi), 0.8 * sin(0.6 * pi)),\r\n 'FP2': (0.8 * cos(0.4 * pi), 0.8 * sin(0.4 * pi)),\r\n 'Fpz': (0, 0.8),\r\n 'FT7': (0.8 * cos(0.9 * pi), 0.8 * sin(0.9 * pi)),\r\n 'FT8': (0.8 * cos(0.1 * pi), 0.8 * sin(0.1 * pi)),\r\n 'Fz': (0, 0.4),\r\n 'Iz': (0, -1),\r\n 'Nz': (0, 1),\r\n 'P1': (-0.18, -0.41),\r\n 'P2': (0.18, -0.41),\r\n 'P3': (-0.35, -0.42),\r\n 'P4': (0.35, -0.42),\r\n 'P5': (-0.5, -0.44),\r\n 'P6': (0.5, -0.44),\r\n 'P7': (0.8 * cos(1.2 * pi), 0.8 * sin(1.2 * pi)),\r\n 'P8': (0.8 * cos(1.8 * pi), 0.8 * sin(1.8 * pi)),\r\n 'PO3': (-0.24, -0.62),\r\n 'PO4': (0.24, -0.62),\r\n 'PO7': (0.8 * cos(1.3 * pi), 0.8 * sin(1.3 * pi)),\r\n 'PO8': (0.8 * cos(1.7 * pi), 0.8 * sin(1.7 * pi)),\r\n 'POz': (0, -0.6),\r\n 'Pz': (0, -0.4),\r\n 'O1': (0.8 * cos(1.4 * pi), 0.8 * sin(1.4 * pi)),\r\n 'O2': (0.8 * cos(1.6 * pi), 0.8 * sin(1.6 * pi)),\r\n 'Oz': (0, -0.8),\r\n 'T7': (-0.8, 0),\r\n 'T8': (0.8, 0),\r\n 'T9': (-1, 0),\r\n 'T10': (1, 0),\r\n 'TP7': (0.8 * cos(1.1 * pi), 0.8 * sin(1.1 * pi)),\r\n 'TP8': (0.8 * cos(1.9 * pi), 0.8 * sin(1.9 * pi)),\r\n 'TP9': (cos(1.1 * pi), sin(1.1 * pi)),\r\n 'TP10': (cos(1.9 * pi), sin(1.9 * pi)),\r\n}\r\n\r\n\r\nclass TopoPlot(object):\r\n \"\"\"Topographic plot.\"\"\"\r\n\r\n def __init__(self, data=None, axes=None):\r\n \"\"\"Setup defaults.\r\n\r\n Parameters\r\n ----------\r\n data : Pandas.Series or dict\r\n Pandas Series with values indexed by electrodes.\r\n axes : matplotlib.axes.AxesSubplot object\r\n Axis object to render on.\r\n\r\n \"\"\"\r\n if axes is None:\r\n self.figure = plt.figure()\r\n axes = self.figure.gca()\r\n else:\r\n self.figure = axes.get_figure()\r\n self.axes = axes\r\n self.center = np.array((0, 0))\r\n if isinstance(data, dict):\r\n self.data = pd.Series(data)\r\n elif isinstance(data, pd.Series):\r\n self.data = data\r\n elif data is None:\r\n self.data = None\r\n else:\r\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\r\n type(data)))\r\n\r\n @staticmethod\r\n def normalize_electrode_name(name):\r\n \"\"\"Normalize electrode name.\r\n\r\n Parameters\r\n ----------\r\n name : str\r\n Name of electrode to be normalized\r\n\r\n Examples\r\n --------\r\n >>> TopoPlot.normalize_electrode_name('fpz')\r\n 'Fpz'\r\n\r\n >>> TopoPlot.normalize_electrode_name('AFZ')\r\n 'AFz'\r\n\r\n \"\"\"\r\n return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z')\r\n\r\n def draw_electrodes(self):\r\n \"\"\"Draw electrodes.\"\"\"\r\n for electrode, position in ELECTRODES.items():\r\n circle = plt.Circle(self.center + position,\r\n radius=0.04, fill=True,\r\n facecolor=(1, 1, 1))\r\n self.axes.add_patch(circle)\r\n position = self.center + position\r\n self.axes.text(position[0], position[1], electrode,\r\n verticalalignment='center',\r\n horizontalalignment='center',\r\n size=6)\r\n\r\n def draw_head(self):\r\n \"\"\"Draw outer head.\"\"\"\r\n circle = plt.Circle(self.center, radius=1, fill=False)\r\n self.axes.add_patch(circle)\r\n\r\n def draw_inner_head(self):\r\n \"\"\"Draw inner head.\"\"\"\r\n circle = plt.Circle(self.center, radius=0.8, fill=False)\r\n self.axes.add_patch(circle)\r\n\r\n def draw_nose(self):\r\n \"\"\"Draw nose.\"\"\"\r\n nose = plt.Line2D([sin(-0.1), 0, sin(0.1)],\r\n [cos(-0.1), 1.1, cos(0.1)],\r\n color=(0, 0, 0))\r\n self.axes.add_line(nose)\r\n\r\n def draw_data(self, method='linear', number_of_contours=10):\r\n \"\"\"Draw countours from provided data.\"\"\"\r\n if self.data is not None:\r\n # Coordinates for points to interpolate to\r\n xi, yi = np.mgrid[-1:1:100j, -1:1:100j]\r\n\r\n # Electrode positions for data to interpolate from\r\n points = []\r\n for electrode in self.data.index:\r\n name = TopoPlot.normalize_electrode_name(electrode)\r\n points.append(ELECTRODES[name])\r\n\r\n # Interpolate\r\n # TODO: Will not work with 2 electrodes.\r\n zi = griddata(points, self.data.values, (xi, yi), method=method)\r\n\r\n # Defaults\r\n if number_of_contours is None:\r\n number_of_contours = 10\r\n\r\n # Draw\r\n plt.contourf(xi, yi, zi, number_of_contours)\r\n\r\n # TODO: center\r\n\r\n def draw(self, title=None, method='linear', number_of_contours=None):\r\n \"\"\"Draw all components in topoplot including the data.\r\n\r\n Parameters\r\n ----------\r\n title : str, optional\r\n Title to put on the plot\r\n methods : str, optional\r\n Interpolation method\r\n number_of_contours : int\r\n Number of contours in the colored plot.\r\n\r\n Examples\r\n --------\r\n >>> import matplotlib.pyplot as plt\r\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\r\n >>> plt.ion()\r\n >>> topo_plot = TopoPlot(data)\r\n >>> topo_plot.draw()\r\n\r\n \"\"\"\r\n self.draw_head()\r\n self.draw_inner_head()\r\n self.draw_electrodes()\r\n self.draw_nose()\r\n self.draw_data(method=method, number_of_contours=number_of_contours)\r\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\r\n self.axes.axis('equal')\r\n if title is not None:\r\n self.axes.set_title(title)\r\n\r\n\r\nclass MultiPlot(TopoPlot):\r\n \"\"\"Multiple plots organized topographically.\r\n\r\n References\r\n ----------\r\n http://www.fieldtriptoolbox.org/reference/ft_multiploter\r\n\r\n \"\"\"\r\n\r\n def __init__(self, data=None, axes=None, xlim=None, ylim=None):\r\n \"\"\"Setup defaults.\r\n\r\n Parameters\r\n ----------\r\n data : Pandas.DataFrame\r\n Pandas DataFrame with values indexed by electrodes.\r\n axes : matplotlib.axes.AxesSubplot object\r\n Axis object to render on.\r\n\r\n \"\"\"\r\n if axes is None:\r\n self.figure = plt.figure()\r\n axes = self.figure.gca()\r\n else:\r\n self.figure = axes.get_figure()\r\n self.axes = axes\r\n\r\n # Contains a list of axes used to plot data data from individual\r\n # electrodes\r\n self._subaxes = []\r\n\r\n self.xlim = xlim\r\n self.ylim = ylim\r\n\r\n self.center = np.array((0, 0))\r\n\r\n if isinstance(data, pd.DataFrame):\r\n self.data = data\r\n elif data is None:\r\n self.data = None\r\n else:\r\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\r\n type(data)))\r\n\r\n def add_subplot_axes(self, ax, rect, axis_bgcolor=None):\r\n \"\"\"Add subaxes to currect specified axes.\r\n\r\n References\r\n ----------\r\n Pablo https://stackoverflow.com/users/2309442/pablo\r\n\r\n Pablo's answer to \"Embedding small plots inside subplots in matplotlib\"\r\n https://stackoverflow.com/questions/17458580/\r\n\r\n \"\"\"\r\n # Modified from\r\n # https://stackoverflow.com/questions/17458580/\r\n box = ax.get_position()\r\n width, height = box.width, box.height\r\n subaxes_box = [(rect[0], rect[1]),\r\n (rect[0] + rect[2], rect[1] + rect[3])]\r\n subaxes_display_coords = ax.transData.transform(subaxes_box)\r\n trans_figure = self.figure.transFigure.inverted()\r\n subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)\r\n x, y = subaxes_figure_coords[0, :]\r\n width, height = (subaxes_figure_coords[1, :] -\r\n subaxes_figure_coords[0, :])\r\n subaxes = self.figure.add_axes(\r\n [x, y, width, height], axis_bgcolor=axis_bgcolor)\r\n x_labelsize = subaxes.get_xticklabels()[0].get_size()\r\n y_labelsize = subaxes.get_yticklabels()[0].get_size()\r\n x_labelsize *= rect[2] ** 0.5\r\n y_labelsize *= rect[3] ** 0.5\r\n subaxes.xaxis.set_tick_params(labelsize=x_labelsize)\r\n subaxes.yaxis.set_tick_params(labelsize=y_labelsize)\r\n return subaxes\r\n\r\n def draw_data(self, type='plot', width=None, height=None,\r\n xlim=None, ylim=None,\r\n vmin=None, vmax=None,\r\n axis=False, yscale='linear'):\r\n \"\"\"Draw data.\r\n\r\n Parameters\r\n ----------\r\n type : 'plot', 'spectrogram', optional\r\n Type of plot\r\n xlim : 2-tuple of floats, optional\r\n X-axis limits\r\n ylim : 2-tuple of floats, optional\r\n Y-axis limits\r\n vmin : float, optional\r\n Minimum value for spectrogram colormap\r\n vmax : float, optional\r\n Maximum value for spectrogram colormap\r\n axis : bool, optional\r\n Determine whether the axis should be shown\r\n\r\n \"\"\"\r\n if self.data is not None:\r\n\r\n if ylim is None:\r\n if self.ylim is None and type != 'spectrogram':\r\n ylim = self.auto_ylim(xlim, yscale=yscale)\r\n else:\r\n ylim = self.ylim\r\n\r\n if xlim is None:\r\n xlim = self.xlim\r\n\r\n if vmin is None:\r\n vmin = 0\r\n\r\n # Determine a suitable width for subaxes\r\n number_of_electrodes = len([\r\n electrode\r\n for electrode in self.data.columns\r\n if electrode in ELECTRODES])\r\n if width is None:\r\n if number_of_electrodes > 32:\r\n width = 0.15\r\n else:\r\n width = 0.25\r\n if height is None:\r\n height = 0.25\r\n\r\n for electrode in self.data.columns:\r\n if electrode in ELECTRODES:\r\n\r\n # Axes and position\r\n x, y = ELECTRODES[electrode]\r\n subaxes = self.add_subplot_axes(\r\n self.axes,\r\n [x - width / 2, y - height / 2, width, height],\r\n axis_bgcolor='w')\r\n\r\n # Actual data plot\r\n if type == 'plot':\r\n self.data.ix[:, electrode].plot(\r\n ax=subaxes, xlim=xlim, ylim=ylim)\r\n\r\n if not axis:\r\n # x-axis\r\n trans = transforms.blended_transform_factory(\r\n subaxes.transAxes, subaxes.transData)\r\n line = lines.Line2D(\r\n (0, 1), (0, 0),\r\n transform=trans, color=(0, 0, 0))\r\n subaxes.add_line(line)\r\n\r\n trans = transforms.blended_transform_factory(\r\n subaxes.transAxes, subaxes.transAxes)\r\n line = lines.Line2D(\r\n (0, 0), (0, 1),\r\n transform=trans, color=(0, 0, 0))\r\n subaxes.add_line(line)\r\n\r\n elif type == 'spectrogram':\r\n spectrum, frequencies, midpoints, axes = plt.specgram(\r\n self.data.ix[:, electrode],\r\n Fs=self.data.sampling_rate,\r\n vmin=vmin,\r\n vmax=vmax,\r\n axes=subaxes)\r\n\r\n # Adjust axis around spectrogram image.\r\n if xlim is None:\r\n xlim = midpoints[0], midpoints[-1]\r\n subaxes.set_xlim(xlim)\r\n if ylim is None:\r\n ylim = frequencies[0], frequencies[-1]\r\n subaxes.set_ylim(ylim)\r\n\r\n else:\r\n raise ValueError(\"Wrong value for 'type' argument\")\r\n\r\n if not axis:\r\n subaxes.set_axis_off()\r\n\r\n # Annotation\r\n # http://matplotlib.org/users/transforms_tutorial.html\r\n subaxes.text(0.5, 0.95, electrode,\r\n transform=subaxes.transAxes,\r\n fontweight='bold', va='top', ha='center')\r\n subaxes.set_yticklabels([])\r\n subaxes.set_xticklabels([])\r\n\r\n self._subaxes.append(subaxes)\r\n\r\n @property\r\n def xlim(self):\r\n \"\"\"Return xlim for subplots.\"\"\"\r\n lim = [ax.get_xlim() for ax in self._subaxes]\r\n if lim == []:\r\n lim = None\r\n return lim\r\n\r\n @xlim.setter\r\n def xlim(self, left=None, right=None):\r\n \"\"\"Set x-axis limits on all subplots.\"\"\"\r\n for ax in self._subaxes:\r\n ax.set_xlim(left, right)\r\n self.figure.canvas.draw()\r\n\r\n @property\r\n def ylim(self):\r\n \"\"\"Return ylim for subplots.\"\"\"\r\n lim = [ax.get_ylim() for ax in self._subaxes]\r\n if lim == []:\r\n lim = None\r\n return lim\r\n\r\n @ylim.setter\r\n def ylim(self, bottom=None, top=None):\r\n \"\"\"Set y-axis limits on all subplots.\"\"\"\r\n for ax in self._subaxes:\r\n ax.set_ylim(bottom, top)\r\n self.figure.canvas.draw()\r\n\r\n @property\r\n def yscale(self):\r\n \"\"\"Return yscale for subplots.\"\"\"\r\n yscales = [ax.get_yscale() for ax in self._subaxes]\r\n return yscales\r\n\r\n @yscale.setter\r\n def yscale(self, value='linear'):\r\n \"\"\"Set y-axis limits on all subplots.\"\"\"\r\n for ax in self._subaxes:\r\n ax.set_yscale(value)\r\n self.figure.canvas.draw()\r\n\r\n def auto_ylim(self, xlim=None, yscale='linear'):\r\n \"\"\"Return an estimate for a good ylim.\r\n\r\n Parameters\r\n ----------\r\n xlim : 2-tuple, optional\r\n Limits in (the index of) the data from where the scaling should be\r\n computed.\r\n yscale : linear or log, optional\r\n Scaling of y-axis.\r\n\r\n \"\"\"\r\n electrodes = [col for col in self.data.columns\r\n if col in ELECTRODES]\r\n if xlim is None:\r\n data = self.data.ix[:, electrodes]\r\n else:\r\n indices = ((self.data.index >= xlim[0]) &\r\n (self.data.index <= xlim[1]))\r\n data = self.data.ix[indices, electrodes]\r\n min_data = data.min().min()\r\n max_data = data.max().max()\r\n abs_max = max(abs(min_data), max_data)\r\n if yscale == 'linear' or yscale == 'symlog':\r\n if min_data >= 0:\r\n ylim = 0, max_data\r\n else:\r\n ylim = -abs_max, abs_max\r\n elif yscale == 'log':\r\n if min_data > 0:\r\n ylim = min_data, max_data\r\n else:\r\n pseudo_zero = abs_max * 10 ** -5\r\n ylim = pseudo_zero, abs_max\r\n else:\r\n raise ValueError('Wrong value to yscale: {}'.format(yscale))\r\n return ylim\r\n\r\n def draw(self, type='plot', title=None, xlim=None, ylim=None,\r\n vmin=None, vmax=None,\r\n axis=False, yscale='linear'):\r\n \"\"\"Draw all components in multiplot including the data.\r\n\r\n Parameters\r\n ----------\r\n title : str, optional\r\n Title to put on the plot\r\n xlim : tuple of floats, optional\r\n X-axis limits used for each individual plots\r\n ylim : tuple of floats, optional\r\n Y-axis limits used for each individual plots\r\n\r\n \"\"\"\r\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\r\n self.draw_head()\r\n self.draw_inner_head()\r\n self.draw_nose()\r\n self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin,\r\n vmax=vmax, axis=axis, yscale=yscale)\r\n if title is not None:\r\n self.axes.set_title(title)\r\n self.yscale = yscale\r\n\r\n\r\ndef topoplot(data=None, axes=None, method='linear', number_of_contours=10,\r\n title=None, xlim=None, ylim=None):\r\n \"\"\"Plot topographic map of the scalp in 2-D circular view.\r\n\r\n Draw the colored scalp map based on data in a Pandas Series where\r\n the values are indexed according to electrode name.\r\n\r\n Parameters\r\n ----------\r\n data : pandas.Series or pandas.DataFrame, optional\r\n Series with values and indexed by electrode names.\r\n methods : str, optional\r\n Interpolation method\r\n number_of_contours : int\r\n Number of contours in the colored plot.\r\n xlim : 2-tuple of floats, optional\r\n Limits of x-axis in multiplot\r\n ylim : 2-tuple of floats, optional\r\n Limits of y-axis in multiplot\r\n\r\n References\r\n ----------\r\n https://github.com/compmem/ptsa/blob/master/ptsa/plotting/topo.py\r\n\r\n http://sccn.ucsd.edu/~jung/tutorial/topoplot.htm\r\n\r\n Examples\r\n --------\r\n >>> import matplotlib.pyplot as plt\r\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\r\n >>> plt.ion()\r\n >>> topo_plot = topoplot(data)\r\n\r\n \"\"\"\r\n if isinstance(data, pd.Series) or isinstance(data, dict) or data is None:\r\n topo_plot = TopoPlot(data=data, axes=axes)\r\n topo_plot.draw(title=title, method=method,\r\n number_of_contours=number_of_contours)\r\n return topo_plot\r\n elif isinstance(data, pd.DataFrame):\r\n multi_plot = MultiPlot(data=data, axes=axes)\r\n multi_plot.draw(title=title, xlim=xlim, ylim=ylim)\r\n return multi_plot\r\n\r\n\r\ndef show():\r\n \"\"\"Show plot.\"\"\"\r\n plt.show()\r\n\r\n\r\ndef main(args):\r\n \"\"\"Handle command-line interface to topographic plot.\"\"\"\r\n xlim = args['--xlim']\r\n if args['--xlim'] is not None:\r\n xlim = [float(lim) for lim in xlim.split(',')]\r\n\r\n if args['<file>'] is None:\r\n topoplot()\r\n else:\r\n filename = args['<file>']\r\n if filename.lower().endswith('.csv'):\r\n from .core import read_csv\r\n\r\n df = read_csv(filename, index_col=0)\r\n if args['--transpose']:\r\n df = df.T\r\n if args['--sample-index'] is None:\r\n if args['--center'] is not None:\r\n df = df.center()\r\n topoplot(df, xlim=xlim)\r\n else:\r\n sample_index = int(args['--sample-index'])\r\n series = df.iloc[sample_index - 1, :]\r\n topoplot(series)\r\n else:\r\n exit('Only csv files handled')\r\n plt.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n from docopt import docopt\r\n\r\n main(docopt(__doc__))\r\n", "step-ids": [ 19, 22, 23, 25, 30 ] }
[ 19, 22, 23, 25, 30 ]
def quick_sort(arr): q_sort(arr, 0, len(arr) - 1) def q_sort(arr, left, right): if left < right: pivot_index = partition(arr, left, right) q_sort(arr, left, pivot_index - 1) q_sort(arr, pivot_index + 1, right) <|reserved_special_token_0|> <|reserved_special_token_1|> def quick_sort(arr): q_sort(arr, 0, len(arr) - 1) def q_sort(arr, left, right): if left < right: pivot_index = partition(arr, left, right) q_sort(arr, left, pivot_index - 1) q_sort(arr, pivot_index + 1, right) def partition(arr, left, right): pivot = arr[left] while left < right: while left < right and arr[right] >= pivot: right -= 1 arr[left] = arr[right] while left < right and arr[left] <= pivot: left += 1 arr[right] = arr[left] arr[left] = pivot return left <|reserved_special_token_0|> <|reserved_special_token_1|> def quick_sort(arr): q_sort(arr, 0, len(arr) - 1) def q_sort(arr, left, right): if left < right: pivot_index = partition(arr, left, right) q_sort(arr, left, pivot_index - 1) q_sort(arr, pivot_index + 1, right) def partition(arr, left, right): pivot = arr[left] while left < right: while left < right and arr[right] >= pivot: right -= 1 arr[left] = arr[right] while left < right and arr[left] <= pivot: left += 1 arr[right] = arr[left] arr[left] = pivot return left def partition_1(arr, low, high): pivot = arr[high] store_index = low for i in range(low, high): if arr[i] < pivot: arr[store_index], arr[i] = arr[i], arr[store_index] store_index += 1 arr[store_index], arr[high] = arr[high], arr[store_index] return store_index <|reserved_special_token_0|> <|reserved_special_token_1|> def quick_sort(arr): q_sort(arr, 0, len(arr) - 1) def q_sort(arr, left, right): if left < right: pivot_index = partition(arr, left, right) q_sort(arr, left, pivot_index - 1) q_sort(arr, pivot_index + 1, right) def partition(arr, left, right): pivot = arr[left] while left < right: while left < right and arr[right] >= pivot: right -= 1 arr[left] = arr[right] while left < right and arr[left] <= pivot: left += 1 arr[right] = arr[left] arr[left] = pivot return left def partition_1(arr, low, high): pivot = arr[high] store_index = low for i in range(low, high): if arr[i] < pivot: arr[store_index], arr[i] = arr[i], arr[store_index] store_index += 1 arr[store_index], arr[high] = arr[high], arr[store_index] return store_index if __name__ == '__main__': arr = [5, 9, 1, 11, 6, 7, 2, 4] quick_sort(arr) print(arr) <|reserved_special_token_1|> def quick_sort(arr): q_sort(arr, 0, len(arr) - 1) def q_sort(arr, left, right): if left < right: pivot_index = partition(arr, left, right) q_sort(arr, left, pivot_index - 1) q_sort(arr, pivot_index + 1, right) def partition(arr, left, right): pivot = arr[left] while left < right: # 如果列表后边的数比基准数大或相等, 则前移一位直到有比基准数小的数出现 while left < right and arr[right] >= pivot: right -= 1 # 如找到, 则把第 right 个元素赋值给 left 位置,此时表中 left 和 right 的元素相等 arr[left] = arr[right] # # 减少下一个循环的一次比较 # if left < right: # left += 1 # 同样的方式比较前半区 while left < right and arr[left] <= pivot: left += 1 arr[right] = arr[left] # if left < right: # right -= 1 # 做完一轮比较之后, 列表被分成了两个半区, 并且 left=right , 需要将这个数设置回 pivot arr[left] = pivot return left def partition_1(arr, low, high): pivot = arr[high] store_index = low # 位置 store_index 存储较小元素 for i in range(low, high): # 当前元素小于或等于 pivot if arr[i] < pivot: arr[store_index], arr[i] = arr[i], arr[store_index] store_index += 1 arr[store_index], arr[high] = arr[high], arr[store_index] return store_index if __name__ == '__main__': # arr = [3, 44, 38, 5, 47, 15, 36, 26, 27, 2, 46, 4, 19, 50, 48] arr = [5, 9, 1, 11, 6, 7, 2, 4] quick_sort(arr) print(arr)
flexible
{ "blob_id": "09a5c96b7f496aca6b34d7f0a83d5b1e182ca409", "index": 1627, "step-1": "def quick_sort(arr):\n q_sort(arr, 0, len(arr) - 1)\n\n\ndef q_sort(arr, left, right):\n if left < right:\n pivot_index = partition(arr, left, right)\n q_sort(arr, left, pivot_index - 1)\n q_sort(arr, pivot_index + 1, right)\n\n\n<mask token>\n", "step-2": "def quick_sort(arr):\n q_sort(arr, 0, len(arr) - 1)\n\n\ndef q_sort(arr, left, right):\n if left < right:\n pivot_index = partition(arr, left, right)\n q_sort(arr, left, pivot_index - 1)\n q_sort(arr, pivot_index + 1, right)\n\n\ndef partition(arr, left, right):\n pivot = arr[left]\n while left < right:\n while left < right and arr[right] >= pivot:\n right -= 1\n arr[left] = arr[right]\n while left < right and arr[left] <= pivot:\n left += 1\n arr[right] = arr[left]\n arr[left] = pivot\n return left\n\n\n<mask token>\n", "step-3": "def quick_sort(arr):\n q_sort(arr, 0, len(arr) - 1)\n\n\ndef q_sort(arr, left, right):\n if left < right:\n pivot_index = partition(arr, left, right)\n q_sort(arr, left, pivot_index - 1)\n q_sort(arr, pivot_index + 1, right)\n\n\ndef partition(arr, left, right):\n pivot = arr[left]\n while left < right:\n while left < right and arr[right] >= pivot:\n right -= 1\n arr[left] = arr[right]\n while left < right and arr[left] <= pivot:\n left += 1\n arr[right] = arr[left]\n arr[left] = pivot\n return left\n\n\ndef partition_1(arr, low, high):\n pivot = arr[high]\n store_index = low\n for i in range(low, high):\n if arr[i] < pivot:\n arr[store_index], arr[i] = arr[i], arr[store_index]\n store_index += 1\n arr[store_index], arr[high] = arr[high], arr[store_index]\n return store_index\n\n\n<mask token>\n", "step-4": "def quick_sort(arr):\n q_sort(arr, 0, len(arr) - 1)\n\n\ndef q_sort(arr, left, right):\n if left < right:\n pivot_index = partition(arr, left, right)\n q_sort(arr, left, pivot_index - 1)\n q_sort(arr, pivot_index + 1, right)\n\n\ndef partition(arr, left, right):\n pivot = arr[left]\n while left < right:\n while left < right and arr[right] >= pivot:\n right -= 1\n arr[left] = arr[right]\n while left < right and arr[left] <= pivot:\n left += 1\n arr[right] = arr[left]\n arr[left] = pivot\n return left\n\n\ndef partition_1(arr, low, high):\n pivot = arr[high]\n store_index = low\n for i in range(low, high):\n if arr[i] < pivot:\n arr[store_index], arr[i] = arr[i], arr[store_index]\n store_index += 1\n arr[store_index], arr[high] = arr[high], arr[store_index]\n return store_index\n\n\nif __name__ == '__main__':\n arr = [5, 9, 1, 11, 6, 7, 2, 4]\n quick_sort(arr)\n print(arr)\n", "step-5": "def quick_sort(arr):\n q_sort(arr, 0, len(arr) - 1)\n\n\ndef q_sort(arr, left, right):\n if left < right:\n pivot_index = partition(arr, left, right)\n\n q_sort(arr, left, pivot_index - 1)\n q_sort(arr, pivot_index + 1, right)\n\n\ndef partition(arr, left, right):\n pivot = arr[left]\n\n while left < right:\n # 如果列表后边的数比基准数大或相等, 则前移一位直到有比基准数小的数出现\n while left < right and arr[right] >= pivot:\n right -= 1\n # 如找到, 则把第 right 个元素赋值给 left 位置,此时表中 left 和 right 的元素相等\n arr[left] = arr[right]\n # # 减少下一个循环的一次比较\n # if left < right:\n # left += 1\n\n # 同样的方式比较前半区\n while left < right and arr[left] <= pivot:\n left += 1\n arr[right] = arr[left]\n # if left < right:\n # right -= 1\n\n # 做完一轮比较之后, 列表被分成了两个半区, 并且 left=right , 需要将这个数设置回 pivot\n arr[left] = pivot\n return left\n\n\ndef partition_1(arr, low, high):\n pivot = arr[high]\n store_index = low # 位置 store_index 存储较小元素\n\n for i in range(low, high):\n # 当前元素小于或等于 pivot\n if arr[i] < pivot:\n arr[store_index], arr[i] = arr[i], arr[store_index]\n store_index += 1\n arr[store_index], arr[high] = arr[high], arr[store_index]\n\n return store_index\n\n\nif __name__ == '__main__':\n # arr = [3, 44, 38, 5, 47, 15, 36, 26, 27, 2, 46, 4, 19, 50, 48]\n arr = [5, 9, 1, 11, 6, 7, 2, 4]\n quick_sort(arr)\n print(arr)\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> def change_label(): var.set(random.choice(ch)) <|reserved_special_token_0|> def slove(): expr.set(eval(expr.get())) <|reserved_special_token_0|> def clear(): expr.set('') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> var.set('Hello world I am a Label') <|reserved_special_token_0|> label.pack() def change_label(): var.set(random.choice(ch)) <|reserved_special_token_0|> b1.pack() <|reserved_special_token_0|> main_frame.pack() <|reserved_special_token_0|> button.pack() def slove(): expr.set(eval(expr.get())) <|reserved_special_token_0|> def clear(): expr.set('') <|reserved_special_token_0|> e1.pack() result_button.pack() clr_button.pack(anchor='sw') root.title('My Appliction') root.wm_minsize(400, 400) root.wm_maxsize(500, 500) root.geometry('+500+200') root.mainloop() <|reserved_special_token_1|> <|reserved_special_token_0|> root = tk.Tk() main_frame = tk.Frame(root) var = tk.StringVar() ch = ['hello world', 'HI Pyton', 'Mar Java', 'Mit Java', 'Lut Java'] var.set('Hello world I am a Label') label = tk.Label(main_frame, textvariable=var, bg='black', fg='white', font =('Times New Roman', 24, 'bold')) label.pack() def change_label(): var.set(random.choice(ch)) b1 = tk.Button(main_frame, text='click', command=change_label, font=( 'Arial', 15, 'bold'), bg='pink', fg='red') b1.pack() expr = tk.StringVar() e1 = tk.Entry(root, textvariable=expr, font=('Arial', 20, 'bold'), bg= 'gray', fg='white') main_frame.pack() button = tk.Button(root, text='!!EXIT!!', command=root.destroy, font=( 'Arial', 15, 'bold'), bg='pink', fg='red') button.pack() def slove(): expr.set(eval(expr.get())) result_button = tk.Button(root, text='!!Result!!', command=slove, font=( 'Arial', 15, 'bold'), bg='pink', fg='red') def clear(): expr.set('') clr_button = tk.Button(root, text='!!clear!!', command=clear, font=('Arial', 15, 'bold'), bg='pink', fg='red') e1.pack() result_button.pack() clr_button.pack(anchor='sw') root.title('My Appliction') root.wm_minsize(400, 400) root.wm_maxsize(500, 500) root.geometry('+500+200') root.mainloop() <|reserved_special_token_1|> import tkinter as tk import random root = tk.Tk() main_frame = tk.Frame(root) var = tk.StringVar() ch = ['hello world', 'HI Pyton', 'Mar Java', 'Mit Java', 'Lut Java'] var.set('Hello world I am a Label') label = tk.Label(main_frame, textvariable=var, bg='black', fg='white', font =('Times New Roman', 24, 'bold')) label.pack() def change_label(): var.set(random.choice(ch)) b1 = tk.Button(main_frame, text='click', command=change_label, font=( 'Arial', 15, 'bold'), bg='pink', fg='red') b1.pack() expr = tk.StringVar() e1 = tk.Entry(root, textvariable=expr, font=('Arial', 20, 'bold'), bg= 'gray', fg='white') main_frame.pack() button = tk.Button(root, text='!!EXIT!!', command=root.destroy, font=( 'Arial', 15, 'bold'), bg='pink', fg='red') button.pack() def slove(): expr.set(eval(expr.get())) result_button = tk.Button(root, text='!!Result!!', command=slove, font=( 'Arial', 15, 'bold'), bg='pink', fg='red') def clear(): expr.set('') clr_button = tk.Button(root, text='!!clear!!', command=clear, font=('Arial', 15, 'bold'), bg='pink', fg='red') e1.pack() result_button.pack() clr_button.pack(anchor='sw') root.title('My Appliction') root.wm_minsize(400, 400) root.wm_maxsize(500, 500) root.geometry('+500+200') root.mainloop() <|reserved_special_token_1|> import tkinter as tk import random root = tk.Tk() main_frame = tk.Frame(root) var = tk.StringVar() ch = [ "hello world" , "HI Pyton", "Mar Java", "Mit Java", "Lut Java" ] var.set("Hello world I am a Label") label = tk.Label(main_frame,textvariable=var, bg="black",fg="white",font=("Times New Roman",24,"bold")) label.pack() def change_label(): var.set(random.choice(ch)) b1 = tk.Button(main_frame,text="click",command=change_label, font=("Arial",15,'bold'),bg="pink",fg="red") b1.pack() expr = tk.StringVar() e1 = tk.Entry(root,textvariable=expr,font=("Arial",20,'bold'), bg='gray',fg='white') main_frame.pack() button = tk.Button(root,text="!!EXIT!!",command=root.destroy, font=("Arial",15,'bold'),bg="pink",fg="red") button.pack() def slove(): expr.set(eval(expr.get())) result_button= tk.Button(root,text="!!Result!!",command=slove, font=("Arial",15,'bold'),bg="pink",fg="red") def clear(): expr.set("") clr_button= tk.Button(root,text="!!clear!!",command=clear, font=("Arial",15,'bold'),bg="pink",fg="red") e1.pack() result_button.pack() clr_button.pack(anchor='sw') root.title("My Appliction") root.wm_minsize(400,400) root.wm_maxsize(500,500) root.geometry("+500+200") root.mainloop()
flexible
{ "blob_id": "33938a28aad29e996255827825a0cdb1db6b70b7", "index": 5842, "step-1": "<mask token>\n\n\ndef change_label():\n var.set(random.choice(ch))\n\n\n<mask token>\n\n\ndef slove():\n expr.set(eval(expr.get()))\n\n\n<mask token>\n\n\ndef clear():\n expr.set('')\n\n\n<mask token>\n", "step-2": "<mask token>\nvar.set('Hello world I am a Label')\n<mask token>\nlabel.pack()\n\n\ndef change_label():\n var.set(random.choice(ch))\n\n\n<mask token>\nb1.pack()\n<mask token>\nmain_frame.pack()\n<mask token>\nbutton.pack()\n\n\ndef slove():\n expr.set(eval(expr.get()))\n\n\n<mask token>\n\n\ndef clear():\n expr.set('')\n\n\n<mask token>\ne1.pack()\nresult_button.pack()\nclr_button.pack(anchor='sw')\nroot.title('My Appliction')\nroot.wm_minsize(400, 400)\nroot.wm_maxsize(500, 500)\nroot.geometry('+500+200')\nroot.mainloop()\n", "step-3": "<mask token>\nroot = tk.Tk()\nmain_frame = tk.Frame(root)\nvar = tk.StringVar()\nch = ['hello world', 'HI Pyton', 'Mar Java', 'Mit Java', 'Lut Java']\nvar.set('Hello world I am a Label')\nlabel = tk.Label(main_frame, textvariable=var, bg='black', fg='white', font\n =('Times New Roman', 24, 'bold'))\nlabel.pack()\n\n\ndef change_label():\n var.set(random.choice(ch))\n\n\nb1 = tk.Button(main_frame, text='click', command=change_label, font=(\n 'Arial', 15, 'bold'), bg='pink', fg='red')\nb1.pack()\nexpr = tk.StringVar()\ne1 = tk.Entry(root, textvariable=expr, font=('Arial', 20, 'bold'), bg=\n 'gray', fg='white')\nmain_frame.pack()\nbutton = tk.Button(root, text='!!EXIT!!', command=root.destroy, font=(\n 'Arial', 15, 'bold'), bg='pink', fg='red')\nbutton.pack()\n\n\ndef slove():\n expr.set(eval(expr.get()))\n\n\nresult_button = tk.Button(root, text='!!Result!!', command=slove, font=(\n 'Arial', 15, 'bold'), bg='pink', fg='red')\n\n\ndef clear():\n expr.set('')\n\n\nclr_button = tk.Button(root, text='!!clear!!', command=clear, font=('Arial',\n 15, 'bold'), bg='pink', fg='red')\ne1.pack()\nresult_button.pack()\nclr_button.pack(anchor='sw')\nroot.title('My Appliction')\nroot.wm_minsize(400, 400)\nroot.wm_maxsize(500, 500)\nroot.geometry('+500+200')\nroot.mainloop()\n", "step-4": "import tkinter as tk\nimport random\nroot = tk.Tk()\nmain_frame = tk.Frame(root)\nvar = tk.StringVar()\nch = ['hello world', 'HI Pyton', 'Mar Java', 'Mit Java', 'Lut Java']\nvar.set('Hello world I am a Label')\nlabel = tk.Label(main_frame, textvariable=var, bg='black', fg='white', font\n =('Times New Roman', 24, 'bold'))\nlabel.pack()\n\n\ndef change_label():\n var.set(random.choice(ch))\n\n\nb1 = tk.Button(main_frame, text='click', command=change_label, font=(\n 'Arial', 15, 'bold'), bg='pink', fg='red')\nb1.pack()\nexpr = tk.StringVar()\ne1 = tk.Entry(root, textvariable=expr, font=('Arial', 20, 'bold'), bg=\n 'gray', fg='white')\nmain_frame.pack()\nbutton = tk.Button(root, text='!!EXIT!!', command=root.destroy, font=(\n 'Arial', 15, 'bold'), bg='pink', fg='red')\nbutton.pack()\n\n\ndef slove():\n expr.set(eval(expr.get()))\n\n\nresult_button = tk.Button(root, text='!!Result!!', command=slove, font=(\n 'Arial', 15, 'bold'), bg='pink', fg='red')\n\n\ndef clear():\n expr.set('')\n\n\nclr_button = tk.Button(root, text='!!clear!!', command=clear, font=('Arial',\n 15, 'bold'), bg='pink', fg='red')\ne1.pack()\nresult_button.pack()\nclr_button.pack(anchor='sw')\nroot.title('My Appliction')\nroot.wm_minsize(400, 400)\nroot.wm_maxsize(500, 500)\nroot.geometry('+500+200')\nroot.mainloop()\n", "step-5": "import tkinter as tk \nimport random\nroot = tk.Tk()\nmain_frame = tk.Frame(root)\nvar = tk.StringVar()\nch = [ \"hello world\" , \"HI Pyton\", \"Mar Java\", \"Mit Java\", \"Lut Java\" ]\nvar.set(\"Hello world I am a Label\")\nlabel = tk.Label(main_frame,textvariable=var,\n bg=\"black\",fg=\"white\",font=(\"Times New Roman\",24,\"bold\"))\nlabel.pack()\ndef change_label():\n var.set(random.choice(ch))\nb1 = tk.Button(main_frame,text=\"click\",command=change_label,\n font=(\"Arial\",15,'bold'),bg=\"pink\",fg=\"red\")\n\nb1.pack()\n\nexpr = tk.StringVar()\ne1 = tk.Entry(root,textvariable=expr,font=(\"Arial\",20,'bold'),\n bg='gray',fg='white')\n\nmain_frame.pack()\n\nbutton = tk.Button(root,text=\"!!EXIT!!\",command=root.destroy,\n font=(\"Arial\",15,'bold'),bg=\"pink\",fg=\"red\")\nbutton.pack()\ndef slove():\n expr.set(eval(expr.get()))\nresult_button= tk.Button(root,text=\"!!Result!!\",command=slove,\n font=(\"Arial\",15,'bold'),bg=\"pink\",fg=\"red\")\ndef clear():\n expr.set(\"\")\nclr_button= tk.Button(root,text=\"!!clear!!\",command=clear,\n font=(\"Arial\",15,'bold'),bg=\"pink\",fg=\"red\")\ne1.pack()\nresult_button.pack()\nclr_button.pack(anchor='sw')\nroot.title(\"My Appliction\")\nroot.wm_minsize(400,400)\nroot.wm_maxsize(500,500)\nroot.geometry(\"+500+200\")\nroot.mainloop()\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
numero_uno=int(input("ingresa el primer numero ")) numero_dos=int(input("ingresa el segundo numero ")) print(numero_uno) print(numero_dos) total=numero_uno +numero_dos print("el total de la suma de : "+str(numero_uno)+" + "+str(numero_dos)+" es = a "+str(total))
normal
{ "blob_id": "5685befae923fc336a2a5e0eb5e382c2e7d82d04", "index": 9613, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(numero_uno)\nprint(numero_dos)\n<mask token>\nprint('el total de la suma de : ' + str(numero_uno) + ' + ' + str(\n numero_dos) + ' es = a ' + str(total))\n", "step-3": "numero_uno = int(input('ingresa el primer numero '))\nnumero_dos = int(input('ingresa el segundo numero '))\nprint(numero_uno)\nprint(numero_dos)\ntotal = numero_uno + numero_dos\nprint('el total de la suma de : ' + str(numero_uno) + ' + ' + str(\n numero_dos) + ' es = a ' + str(total))\n", "step-4": "numero_uno=int(input(\"ingresa el primer numero \"))\nnumero_dos=int(input(\"ingresa el segundo numero \"))\nprint(numero_uno)\nprint(numero_dos)\ntotal=numero_uno\t+numero_dos\nprint(\"el total de la suma de : \"+str(numero_uno)+\" + \"+str(numero_dos)+\" es = a \"+str(total))", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> _base_ = '../model.py' model = dict(type='ImageClassifier', task='classification', pretrained=None, backbone=dict(), head=dict(in_channels=-1, loss=dict(type= 'CrossEntropyLoss', loss_weight=1.0), topk=(1, 5))) checkpoint_config = dict(type='CheckpointHookWithValResults') <|reserved_special_token_1|> _base_ = "../model.py" model = dict( type="ImageClassifier", task="classification", pretrained=None, backbone=dict(), head=dict(in_channels=-1, loss=dict(type="CrossEntropyLoss", loss_weight=1.0), topk=(1, 5)), ) checkpoint_config = dict(type="CheckpointHookWithValResults")
flexible
{ "blob_id": "8bd5eff12e68f7145676f5e089b51376a82ab489", "index": 3231, "step-1": "<mask token>\n", "step-2": "_base_ = '../model.py'\nmodel = dict(type='ImageClassifier', task='classification', pretrained=None,\n backbone=dict(), head=dict(in_channels=-1, loss=dict(type=\n 'CrossEntropyLoss', loss_weight=1.0), topk=(1, 5)))\ncheckpoint_config = dict(type='CheckpointHookWithValResults')\n", "step-3": "_base_ = \"../model.py\"\n\nmodel = dict(\n type=\"ImageClassifier\",\n task=\"classification\",\n pretrained=None,\n backbone=dict(),\n head=dict(in_channels=-1, loss=dict(type=\"CrossEntropyLoss\", loss_weight=1.0), topk=(1, 5)),\n)\n\ncheckpoint_config = dict(type=\"CheckpointHookWithValResults\")\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> DATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:'}} INSTALLED_APPS = ('django.contrib.auth', 'django.contrib.admin', 'django.contrib.sessions', 'django.contrib.contenttypes', 'django.contrib.sites', 'maintenancemode') MIDDLEWARE = ('django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'maintenancemode.middleware.MaintenanceModeMiddleware') ROOT_URLCONF = 'maintenancemode.tests' SITE_ID = 1 MAINTENANCE_MODE = True MAINTENANCE_IGNORE_URLS = re.compile('^/ignored.*'), <|reserved_special_token_1|> import os, re DATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:'}} INSTALLED_APPS = ('django.contrib.auth', 'django.contrib.admin', 'django.contrib.sessions', 'django.contrib.contenttypes', 'django.contrib.sites', 'maintenancemode') MIDDLEWARE = ('django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'maintenancemode.middleware.MaintenanceModeMiddleware') ROOT_URLCONF = 'maintenancemode.tests' SITE_ID = 1 MAINTENANCE_MODE = True MAINTENANCE_IGNORE_URLS = re.compile('^/ignored.*'), <|reserved_special_token_1|> import os, re DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:' } } INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.sessions', 'django.contrib.contenttypes', 'django.contrib.sites', 'maintenancemode', ) MIDDLEWARE = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'maintenancemode.middleware.MaintenanceModeMiddleware', ) ROOT_URLCONF = 'maintenancemode.tests' SITE_ID = 1 MAINTENANCE_MODE = True # or ``False`` and use ``maintenance`` command MAINTENANCE_IGNORE_URLS = ( re.compile(r'^/ignored.*'), )
flexible
{ "blob_id": "34ecf2bd9bc72a98aba4584880a198dd24899dbe", "index": 6218, "step-1": "<mask token>\n", "step-2": "<mask token>\nDATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME':\n ':memory:'}}\nINSTALLED_APPS = ('django.contrib.auth', 'django.contrib.admin',\n 'django.contrib.sessions', 'django.contrib.contenttypes',\n 'django.contrib.sites', 'maintenancemode')\nMIDDLEWARE = ('django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'maintenancemode.middleware.MaintenanceModeMiddleware')\nROOT_URLCONF = 'maintenancemode.tests'\nSITE_ID = 1\nMAINTENANCE_MODE = True\nMAINTENANCE_IGNORE_URLS = re.compile('^/ignored.*'),\n", "step-3": "import os, re\nDATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME':\n ':memory:'}}\nINSTALLED_APPS = ('django.contrib.auth', 'django.contrib.admin',\n 'django.contrib.sessions', 'django.contrib.contenttypes',\n 'django.contrib.sites', 'maintenancemode')\nMIDDLEWARE = ('django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'maintenancemode.middleware.MaintenanceModeMiddleware')\nROOT_URLCONF = 'maintenancemode.tests'\nSITE_ID = 1\nMAINTENANCE_MODE = True\nMAINTENANCE_IGNORE_URLS = re.compile('^/ignored.*'),\n", "step-4": "import os, re\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': ':memory:'\n }\n}\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.admin',\n 'django.contrib.sessions',\n 'django.contrib.contenttypes',\n 'django.contrib.sites',\n\n 'maintenancemode',\n)\n\nMIDDLEWARE = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n\n 'maintenancemode.middleware.MaintenanceModeMiddleware',\n)\n\nROOT_URLCONF = 'maintenancemode.tests'\n\nSITE_ID = 1\n\nMAINTENANCE_MODE = True # or ``False`` and use ``maintenance`` command\nMAINTENANCE_IGNORE_URLS = (\n re.compile(r'^/ignored.*'),\n)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> def on_action(relay_option, number): """To turn on the chosen relay""" relay_option.on() print(f'relay {number} is turning on') <|reserved_special_token_0|> def toggle_action(relay_option, number): """To toggle the chosen relay""" print(f'relay {number} is toggling') relay_option.on() sleep(0.5) relay_option.off() sleep(0.5) def print_help(): """Print/show help for informations of the required parameter""" print( """ Description Arguments: number number of relay 1 to 8 action on, off, or toggle optional arguments: h show this help message and exit """ ) def options(): """Input the relay number or show help and check the input""" input_str = input('Which relay? ') while True: if input_str == 'h': print_help() return index = int(input_str) - 1 if 0 <= index <= 7: relay_status(RELAYS[index], input_str) relay_action(RELAYS[index], input_str) relay_status(RELAYS[index], input_str) return else: print('index out of range') return def relay_action(relay_number, num): """Do the given order(turn on, turn off, toggle) or raise error""" action = input('Which action? ') while True: try: return {'on': on_action, 'off': off_action, 'toggle': toggle_action }[action](relay_number, num) except KeyError: print('Try again') return relay_action(relay_number, num) def relay_status(relay_number, number): """Check initial relay's status""" if relay_number.value == 1: print(f'relay {number} is on') else: print(f'relay {number} is off') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def on_action(relay_option, number): """To turn on the chosen relay""" relay_option.on() print(f'relay {number} is turning on') def off_action(relay_option, number): """To turn off the chosen relay""" relay_option.off() print(f'relay {number} is turning off') def toggle_action(relay_option, number): """To toggle the chosen relay""" print(f'relay {number} is toggling') relay_option.on() sleep(0.5) relay_option.off() sleep(0.5) def print_help(): """Print/show help for informations of the required parameter""" print( """ Description Arguments: number number of relay 1 to 8 action on, off, or toggle optional arguments: h show this help message and exit """ ) def options(): """Input the relay number or show help and check the input""" input_str = input('Which relay? ') while True: if input_str == 'h': print_help() return index = int(input_str) - 1 if 0 <= index <= 7: relay_status(RELAYS[index], input_str) relay_action(RELAYS[index], input_str) relay_status(RELAYS[index], input_str) return else: print('index out of range') return def relay_action(relay_number, num): """Do the given order(turn on, turn off, toggle) or raise error""" action = input('Which action? ') while True: try: return {'on': on_action, 'off': off_action, 'toggle': toggle_action }[action](relay_number, num) except KeyError: print('Try again') return relay_action(relay_number, num) def relay_status(relay_number, number): """Check initial relay's status""" if relay_number.value == 1: print(f'relay {number} is on') else: print(f'relay {number} is off') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> RELAYS = [LED(23), LED(24), LED(25), LED(8), LED(7), LED(1), LED(12), LED(16)] def on_action(relay_option, number): """To turn on the chosen relay""" relay_option.on() print(f'relay {number} is turning on') def off_action(relay_option, number): """To turn off the chosen relay""" relay_option.off() print(f'relay {number} is turning off') def toggle_action(relay_option, number): """To toggle the chosen relay""" print(f'relay {number} is toggling') relay_option.on() sleep(0.5) relay_option.off() sleep(0.5) def print_help(): """Print/show help for informations of the required parameter""" print( """ Description Arguments: number number of relay 1 to 8 action on, off, or toggle optional arguments: h show this help message and exit """ ) def options(): """Input the relay number or show help and check the input""" input_str = input('Which relay? ') while True: if input_str == 'h': print_help() return index = int(input_str) - 1 if 0 <= index <= 7: relay_status(RELAYS[index], input_str) relay_action(RELAYS[index], input_str) relay_status(RELAYS[index], input_str) return else: print('index out of range') return def relay_action(relay_number, num): """Do the given order(turn on, turn off, toggle) or raise error""" action = input('Which action? ') while True: try: return {'on': on_action, 'off': off_action, 'toggle': toggle_action }[action](relay_number, num) except KeyError: print('Try again') return relay_action(relay_number, num) def relay_status(relay_number, number): """Check initial relay's status""" if relay_number.value == 1: print(f'relay {number} is on') else: print(f'relay {number} is off') while True: options() sleep(1) <|reserved_special_token_1|> <|reserved_special_token_0|> from time import sleep from gpiozero import LED RELAYS = [LED(23), LED(24), LED(25), LED(8), LED(7), LED(1), LED(12), LED(16)] def on_action(relay_option, number): """To turn on the chosen relay""" relay_option.on() print(f'relay {number} is turning on') def off_action(relay_option, number): """To turn off the chosen relay""" relay_option.off() print(f'relay {number} is turning off') def toggle_action(relay_option, number): """To toggle the chosen relay""" print(f'relay {number} is toggling') relay_option.on() sleep(0.5) relay_option.off() sleep(0.5) def print_help(): """Print/show help for informations of the required parameter""" print( """ Description Arguments: number number of relay 1 to 8 action on, off, or toggle optional arguments: h show this help message and exit """ ) def options(): """Input the relay number or show help and check the input""" input_str = input('Which relay? ') while True: if input_str == 'h': print_help() return index = int(input_str) - 1 if 0 <= index <= 7: relay_status(RELAYS[index], input_str) relay_action(RELAYS[index], input_str) relay_status(RELAYS[index], input_str) return else: print('index out of range') return def relay_action(relay_number, num): """Do the given order(turn on, turn off, toggle) or raise error""" action = input('Which action? ') while True: try: return {'on': on_action, 'off': off_action, 'toggle': toggle_action }[action](relay_number, num) except KeyError: print('Try again') return relay_action(relay_number, num) def relay_status(relay_number, number): """Check initial relay's status""" if relay_number.value == 1: print(f'relay {number} is on') else: print(f'relay {number} is off') while True: options() sleep(1) <|reserved_special_token_1|> '''Turning on or off, toggling and checking the status' of a specific relay''' #!/bin/env python3 from time import sleep from gpiozero import LED RELAYS = [ LED(23), LED(24), LED(25), LED(8), LED(7), LED(1), LED(12), LED(16) ] def on_action(relay_option, number): '''To turn on the chosen relay''' relay_option.on() print(f"relay {number} is turning on") def off_action(relay_option, number): '''To turn off the chosen relay''' relay_option.off() print(f"relay {number} is turning off") def toggle_action(relay_option, number): '''To toggle the chosen relay''' print(f"relay {number} is toggling") relay_option.on() sleep(0.5) relay_option.off() sleep(0.5) def print_help(): '''Print/show help for informations of the required parameter''' print(''' Description Arguments: number number of relay 1 to 8 action on, off, or toggle optional arguments: h show this help message and exit ''') def options(): '''Input the relay number or show help and check the input''' input_str = input("Which relay? ") while True: if input_str == 'h': print_help() return index = int(input_str) - 1 if 0 <= index <= 7: relay_status(RELAYS[index], input_str) relay_action(RELAYS[index], input_str) relay_status(RELAYS[index], input_str) return else: print("index out of range") return def relay_action(relay_number, num): '''Do the given order(turn on, turn off, toggle) or raise error''' action = input("Which action? ") while True: try: return { 'on': on_action, 'off': off_action, 'toggle': toggle_action }[action](relay_number, num) except KeyError: print("Try again") return relay_action(relay_number, num) def relay_status(relay_number, number): '''Check initial relay's status''' if relay_number.value == 1: print(f"relay {number} is on") else: print(f"relay {number} is off") while True: options() sleep(1)
flexible
{ "blob_id": "d82412055affc96d634957c953a35ea69b7e702f", "index": 403, "step-1": "<mask token>\n\n\ndef on_action(relay_option, number):\n \"\"\"To turn on the chosen relay\"\"\"\n relay_option.on()\n print(f'relay {number} is turning on')\n\n\n<mask token>\n\n\ndef toggle_action(relay_option, number):\n \"\"\"To toggle the chosen relay\"\"\"\n print(f'relay {number} is toggling')\n relay_option.on()\n sleep(0.5)\n relay_option.off()\n sleep(0.5)\n\n\ndef print_help():\n \"\"\"Print/show help for informations of the required parameter\"\"\"\n print(\n \"\"\"\nDescription\n\nArguments:\n number number of relay 1 to 8\n action on, off, or toggle\n\noptional arguments:\n h show this help message and exit\n \"\"\"\n )\n\n\ndef options():\n \"\"\"Input the relay number or show help and check the input\"\"\"\n input_str = input('Which relay? ')\n while True:\n if input_str == 'h':\n print_help()\n return\n index = int(input_str) - 1\n if 0 <= index <= 7:\n relay_status(RELAYS[index], input_str)\n relay_action(RELAYS[index], input_str)\n relay_status(RELAYS[index], input_str)\n return\n else:\n print('index out of range')\n return\n\n\ndef relay_action(relay_number, num):\n \"\"\"Do the given order(turn on, turn off, toggle) or raise error\"\"\"\n action = input('Which action? ')\n while True:\n try:\n return {'on': on_action, 'off': off_action, 'toggle': toggle_action\n }[action](relay_number, num)\n except KeyError:\n print('Try again')\n return relay_action(relay_number, num)\n\n\ndef relay_status(relay_number, number):\n \"\"\"Check initial relay's status\"\"\"\n if relay_number.value == 1:\n print(f'relay {number} is on')\n else:\n print(f'relay {number} is off')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef on_action(relay_option, number):\n \"\"\"To turn on the chosen relay\"\"\"\n relay_option.on()\n print(f'relay {number} is turning on')\n\n\ndef off_action(relay_option, number):\n \"\"\"To turn off the chosen relay\"\"\"\n relay_option.off()\n print(f'relay {number} is turning off')\n\n\ndef toggle_action(relay_option, number):\n \"\"\"To toggle the chosen relay\"\"\"\n print(f'relay {number} is toggling')\n relay_option.on()\n sleep(0.5)\n relay_option.off()\n sleep(0.5)\n\n\ndef print_help():\n \"\"\"Print/show help for informations of the required parameter\"\"\"\n print(\n \"\"\"\nDescription\n\nArguments:\n number number of relay 1 to 8\n action on, off, or toggle\n\noptional arguments:\n h show this help message and exit\n \"\"\"\n )\n\n\ndef options():\n \"\"\"Input the relay number or show help and check the input\"\"\"\n input_str = input('Which relay? ')\n while True:\n if input_str == 'h':\n print_help()\n return\n index = int(input_str) - 1\n if 0 <= index <= 7:\n relay_status(RELAYS[index], input_str)\n relay_action(RELAYS[index], input_str)\n relay_status(RELAYS[index], input_str)\n return\n else:\n print('index out of range')\n return\n\n\ndef relay_action(relay_number, num):\n \"\"\"Do the given order(turn on, turn off, toggle) or raise error\"\"\"\n action = input('Which action? ')\n while True:\n try:\n return {'on': on_action, 'off': off_action, 'toggle': toggle_action\n }[action](relay_number, num)\n except KeyError:\n print('Try again')\n return relay_action(relay_number, num)\n\n\ndef relay_status(relay_number, number):\n \"\"\"Check initial relay's status\"\"\"\n if relay_number.value == 1:\n print(f'relay {number} is on')\n else:\n print(f'relay {number} is off')\n\n\n<mask token>\n", "step-3": "<mask token>\nRELAYS = [LED(23), LED(24), LED(25), LED(8), LED(7), LED(1), LED(12), LED(16)]\n\n\ndef on_action(relay_option, number):\n \"\"\"To turn on the chosen relay\"\"\"\n relay_option.on()\n print(f'relay {number} is turning on')\n\n\ndef off_action(relay_option, number):\n \"\"\"To turn off the chosen relay\"\"\"\n relay_option.off()\n print(f'relay {number} is turning off')\n\n\ndef toggle_action(relay_option, number):\n \"\"\"To toggle the chosen relay\"\"\"\n print(f'relay {number} is toggling')\n relay_option.on()\n sleep(0.5)\n relay_option.off()\n sleep(0.5)\n\n\ndef print_help():\n \"\"\"Print/show help for informations of the required parameter\"\"\"\n print(\n \"\"\"\nDescription\n\nArguments:\n number number of relay 1 to 8\n action on, off, or toggle\n\noptional arguments:\n h show this help message and exit\n \"\"\"\n )\n\n\ndef options():\n \"\"\"Input the relay number or show help and check the input\"\"\"\n input_str = input('Which relay? ')\n while True:\n if input_str == 'h':\n print_help()\n return\n index = int(input_str) - 1\n if 0 <= index <= 7:\n relay_status(RELAYS[index], input_str)\n relay_action(RELAYS[index], input_str)\n relay_status(RELAYS[index], input_str)\n return\n else:\n print('index out of range')\n return\n\n\ndef relay_action(relay_number, num):\n \"\"\"Do the given order(turn on, turn off, toggle) or raise error\"\"\"\n action = input('Which action? ')\n while True:\n try:\n return {'on': on_action, 'off': off_action, 'toggle': toggle_action\n }[action](relay_number, num)\n except KeyError:\n print('Try again')\n return relay_action(relay_number, num)\n\n\ndef relay_status(relay_number, number):\n \"\"\"Check initial relay's status\"\"\"\n if relay_number.value == 1:\n print(f'relay {number} is on')\n else:\n print(f'relay {number} is off')\n\n\nwhile True:\n options()\n sleep(1)\n", "step-4": "<mask token>\nfrom time import sleep\nfrom gpiozero import LED\nRELAYS = [LED(23), LED(24), LED(25), LED(8), LED(7), LED(1), LED(12), LED(16)]\n\n\ndef on_action(relay_option, number):\n \"\"\"To turn on the chosen relay\"\"\"\n relay_option.on()\n print(f'relay {number} is turning on')\n\n\ndef off_action(relay_option, number):\n \"\"\"To turn off the chosen relay\"\"\"\n relay_option.off()\n print(f'relay {number} is turning off')\n\n\ndef toggle_action(relay_option, number):\n \"\"\"To toggle the chosen relay\"\"\"\n print(f'relay {number} is toggling')\n relay_option.on()\n sleep(0.5)\n relay_option.off()\n sleep(0.5)\n\n\ndef print_help():\n \"\"\"Print/show help for informations of the required parameter\"\"\"\n print(\n \"\"\"\nDescription\n\nArguments:\n number number of relay 1 to 8\n action on, off, or toggle\n\noptional arguments:\n h show this help message and exit\n \"\"\"\n )\n\n\ndef options():\n \"\"\"Input the relay number or show help and check the input\"\"\"\n input_str = input('Which relay? ')\n while True:\n if input_str == 'h':\n print_help()\n return\n index = int(input_str) - 1\n if 0 <= index <= 7:\n relay_status(RELAYS[index], input_str)\n relay_action(RELAYS[index], input_str)\n relay_status(RELAYS[index], input_str)\n return\n else:\n print('index out of range')\n return\n\n\ndef relay_action(relay_number, num):\n \"\"\"Do the given order(turn on, turn off, toggle) or raise error\"\"\"\n action = input('Which action? ')\n while True:\n try:\n return {'on': on_action, 'off': off_action, 'toggle': toggle_action\n }[action](relay_number, num)\n except KeyError:\n print('Try again')\n return relay_action(relay_number, num)\n\n\ndef relay_status(relay_number, number):\n \"\"\"Check initial relay's status\"\"\"\n if relay_number.value == 1:\n print(f'relay {number} is on')\n else:\n print(f'relay {number} is off')\n\n\nwhile True:\n options()\n sleep(1)\n", "step-5": "'''Turning on or off, toggling and checking the status' of a specific relay'''\n\n#!/bin/env python3\n\nfrom time import sleep\nfrom gpiozero import LED\n\nRELAYS = [\n LED(23),\n LED(24),\n LED(25),\n LED(8),\n LED(7),\n LED(1),\n LED(12),\n LED(16)\n]\n\n\ndef on_action(relay_option, number):\n '''To turn on the chosen relay'''\n relay_option.on()\n print(f\"relay {number} is turning on\")\n\n\ndef off_action(relay_option, number):\n '''To turn off the chosen relay'''\n relay_option.off()\n print(f\"relay {number} is turning off\")\n\n\ndef toggle_action(relay_option, number):\n '''To toggle the chosen relay'''\n print(f\"relay {number} is toggling\")\n relay_option.on()\n sleep(0.5)\n relay_option.off()\n sleep(0.5)\n\n\ndef print_help():\n '''Print/show help for informations of the required parameter'''\n print('''\nDescription\n\nArguments:\n number number of relay 1 to 8\n action on, off, or toggle\n\noptional arguments:\n h show this help message and exit\n ''')\n\n\ndef options():\n '''Input the relay number or show help and check the input'''\n input_str = input(\"Which relay? \")\n while True:\n if input_str == 'h':\n print_help()\n return\n\n index = int(input_str) - 1\n if 0 <= index <= 7:\n relay_status(RELAYS[index], input_str)\n relay_action(RELAYS[index], input_str)\n relay_status(RELAYS[index], input_str)\n return\n else:\n print(\"index out of range\")\n return\n\n\ndef relay_action(relay_number, num):\n '''Do the given order(turn on, turn off, toggle) or raise error'''\n action = input(\"Which action? \")\n while True:\n\n try:\n return {\n 'on': on_action,\n 'off': off_action,\n 'toggle': toggle_action\n }[action](relay_number, num)\n except KeyError:\n print(\"Try again\")\n return relay_action(relay_number, num)\n\n\ndef relay_status(relay_number, number):\n '''Check initial relay's status'''\n if relay_number.value == 1:\n print(f\"relay {number} is on\")\n else:\n print(f\"relay {number} is off\")\n\n\nwhile True:\n options()\n sleep(1)\n", "step-ids": [ 6, 7, 9, 10, 11 ] }
[ 6, 7, 9, 10, 11 ]
#GUIcal.py from tkinter import * from tkinter import ttk import math GUI=Tk() GUI.title('My Cal Program') GUI.geometry('500x500') def calc(): height=v_height.get() base=v_base.get()#ดึงค่ามาจากv_base print(f'height is {height}') print(f'Basal length is {base}') length= math.isqrt((height*height)+(base*base)) print('Lenght is {:.2f}'.format(length)) ###For attach picture ''' IMG=PhotoImage(file='pythagorus-theorem.png').subsample(3) IM1=Label(GUI,image=IMG) IM1.pack() ''' v_height=IntVar() v_base=IntVar() L1=Label(text='Please input height',foreground='red',font=('Angsana New',15)) L1.pack() E1=ttk.Entry(GUI,textvariable=v_height) E1.pack(pady=8,ipady=7,ipadx=17) L2=Label(text='Please input basal length',foreground='red',font=('Angsana New',15)) L2.pack() E2=ttk.Entry(GUI,textvariable=v_base) E2.pack(pady=8,ipady=7,ipadx=17) B1=ttk.Button(text='Calculate',command=calc) B1.pack() v_result=StringVar() v_result.set('----Result----') Result=ttk.Label(GUI,textvariable=v_result,foreground='green',font=('Angsana New',15)) Result.pack() GUI.mainloop()
normal
{ "blob_id": "77d7fb49ed4c3e78b148cd446e9a5c6a0e6fac8b", "index": 835, "step-1": "<mask token>\n\n\ndef calc():\n height = v_height.get()\n base = v_base.get()\n print(f'height is {height}')\n print(f'Basal length is {base}')\n length = math.isqrt(height * height + base * base)\n print('Lenght is {:.2f}'.format(length))\n\n\n<mask token>\n", "step-2": "<mask token>\nGUI.title('My Cal Program')\nGUI.geometry('500x500')\n\n\ndef calc():\n height = v_height.get()\n base = v_base.get()\n print(f'height is {height}')\n print(f'Basal length is {base}')\n length = math.isqrt(height * height + base * base)\n print('Lenght is {:.2f}'.format(length))\n\n\n<mask token>\nL1.pack()\n<mask token>\nE1.pack(pady=8, ipady=7, ipadx=17)\n<mask token>\nL2.pack()\n<mask token>\nE2.pack(pady=8, ipady=7, ipadx=17)\n<mask token>\nB1.pack()\n<mask token>\nv_result.set('----Result----')\n<mask token>\nResult.pack()\nGUI.mainloop()\n", "step-3": "<mask token>\nGUI = Tk()\nGUI.title('My Cal Program')\nGUI.geometry('500x500')\n\n\ndef calc():\n height = v_height.get()\n base = v_base.get()\n print(f'height is {height}')\n print(f'Basal length is {base}')\n length = math.isqrt(height * height + base * base)\n print('Lenght is {:.2f}'.format(length))\n\n\n<mask token>\nv_height = IntVar()\nv_base = IntVar()\nL1 = Label(text='Please input height', foreground='red', font=(\n 'Angsana New', 15))\nL1.pack()\nE1 = ttk.Entry(GUI, textvariable=v_height)\nE1.pack(pady=8, ipady=7, ipadx=17)\nL2 = Label(text='Please input basal length', foreground='red', font=(\n 'Angsana New', 15))\nL2.pack()\nE2 = ttk.Entry(GUI, textvariable=v_base)\nE2.pack(pady=8, ipady=7, ipadx=17)\nB1 = ttk.Button(text='Calculate', command=calc)\nB1.pack()\nv_result = StringVar()\nv_result.set('----Result----')\nResult = ttk.Label(GUI, textvariable=v_result, foreground='green', font=(\n 'Angsana New', 15))\nResult.pack()\nGUI.mainloop()\n", "step-4": "from tkinter import *\nfrom tkinter import ttk\nimport math\nGUI = Tk()\nGUI.title('My Cal Program')\nGUI.geometry('500x500')\n\n\ndef calc():\n height = v_height.get()\n base = v_base.get()\n print(f'height is {height}')\n print(f'Basal length is {base}')\n length = math.isqrt(height * height + base * base)\n print('Lenght is {:.2f}'.format(length))\n\n\n<mask token>\nv_height = IntVar()\nv_base = IntVar()\nL1 = Label(text='Please input height', foreground='red', font=(\n 'Angsana New', 15))\nL1.pack()\nE1 = ttk.Entry(GUI, textvariable=v_height)\nE1.pack(pady=8, ipady=7, ipadx=17)\nL2 = Label(text='Please input basal length', foreground='red', font=(\n 'Angsana New', 15))\nL2.pack()\nE2 = ttk.Entry(GUI, textvariable=v_base)\nE2.pack(pady=8, ipady=7, ipadx=17)\nB1 = ttk.Button(text='Calculate', command=calc)\nB1.pack()\nv_result = StringVar()\nv_result.set('----Result----')\nResult = ttk.Label(GUI, textvariable=v_result, foreground='green', font=(\n 'Angsana New', 15))\nResult.pack()\nGUI.mainloop()\n", "step-5": "#GUIcal.py\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nimport math\r\n\r\nGUI=Tk()\r\nGUI.title('My Cal Program')\r\nGUI.geometry('500x500')\r\n\r\ndef calc():\r\n\theight=v_height.get()\r\n\tbase=v_base.get()#ดึงค่ามาจากv_base\r\n\tprint(f'height is {height}')\r\n\tprint(f'Basal length is {base}')\r\n\tlength= math.isqrt((height*height)+(base*base))\r\n\tprint('Lenght is {:.2f}'.format(length))\r\n\t\r\n###For attach picture\r\n'''\r\nIMG=PhotoImage(file='pythagorus-theorem.png').subsample(3)\r\nIM1=Label(GUI,image=IMG)\r\nIM1.pack()\r\n'''\r\nv_height=IntVar()\r\nv_base=IntVar()\r\n\r\nL1=Label(text='Please input height',foreground='red',font=('Angsana New',15))\r\nL1.pack()\r\nE1=ttk.Entry(GUI,textvariable=v_height)\r\nE1.pack(pady=8,ipady=7,ipadx=17)\r\n\r\n\r\nL2=Label(text='Please input basal length',foreground='red',font=('Angsana New',15))\r\nL2.pack()\r\nE2=ttk.Entry(GUI,textvariable=v_base)\r\nE2.pack(pady=8,ipady=7,ipadx=17)\r\n\r\n\r\nB1=ttk.Button(text='Calculate',command=calc)\r\nB1.pack()\r\n\r\nv_result=StringVar()\r\nv_result.set('----Result----')\r\nResult=ttk.Label(GUI,textvariable=v_result,foreground='green',font=('Angsana New',15))\r\nResult.pack()\r\n\r\nGUI.mainloop()\r\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# This is main file where we create the instances of Movie class # and run the file to view the movie website page # we have to import media where class Movie is defined and # fresh_tomatoes python files import fresh_tomatoes import media # Each instance has 8 arguments: Title, story line, poster image, # trailer url, rating, category, director, duration alien_covenant = media.Movie("Alien: Covenant", "The crew of a colony ship, " "bound for a remote planet, discover an " "uncharted paradise with a threat beyond" "their imagination," "and must attempt a harrowing escape.", "https://upload.wikimedia.org/wikipedia/en/3/33/" "Alien_Covenant_Teaser_Poster.jpg", "https://www.youtube.com/watch?v=H0VW6sg50Pk", "R", "Science fiction horror", "Ridley Scott", "123 Minutes") avatar = media.Movie("Avatar", "A marine on an alien planet", "http://upload.wikimedia.org/wikipedia/en/" "b/b0/Avatar-Teaser-Poster.jpg", "http://www.youtube.com/watch?v=5PSNL1qE6VY", "PG-13", "Epic science fiction", "James Cameron", "162 Minutes") okja = media.Movie("Okja", "A young girl named Mija risks everything to " "prevent a powerful, multi-national company " "from kidnapping her best friend," "a massive animal named Okja", "https://upload.wikimedia.org/wikipedia/en/f/f6/Okja.png", "https://www.youtube.com/watch?v=AjCebKn4iic", "R", "Action-Adventure", "Bong Joon-ho", "120 Minutes") gonegirl = media.Movie("Gone Girl", "A sad story", "http://upload.wikimedia.org/wikipedia/en/0/05/" "Gone_Girl_Poster.jpg", "http://www.youtube.com/watch?v=Ym3LB0lOJ0o", "R", "Crime", "David Fincher", "149 Minutes") avenger = media.Movie("Avenger", "A story about superheroes", "http://upload.wikimedia.org/wikipedia/en/3/37/" "Captain_America_The_First_Avenger_poster.jpg", "http://www.youtube.com/watch?v=hIR8Ar-Z4hw", "PG-13", "Action", "Joss Whedon", "143 Minutes") dark_knight = media.Movie("Dark knight rises", "A story about batman", "http://upload.wikimedia.org/wikipedia/en/8/83/" "Dark_knight_rises_poster.jpg", "http://www.youtube.com/watch?v=g8evyE9TuYk", "PG-13", "Action", "Christopher Nolan", "165 Minutes") # Creating a list of all instances movies = [alien_covenant, avatar, okja, gonegirl, avenger, dark_knight] # Calling open_movies_page function to create fresh_tomatoes.html # file which contains a movie web page fresh_tomatoes.open_movies_page(movies)
normal
{ "blob_id": "9dfc8414628a8b09de3c24c504dd4163efdd3d35", "index": 6010, "step-1": "<mask token>\n", "step-2": "<mask token>\nfresh_tomatoes.open_movies_page(movies)\n", "step-3": "<mask token>\nalien_covenant = media.Movie('Alien: Covenant',\n 'The crew of a colony ship, bound for a remote planet, discover an uncharted paradise with a threat beyondtheir imagination,and must attempt a harrowing escape.'\n ,\n 'https://upload.wikimedia.org/wikipedia/en/3/33/Alien_Covenant_Teaser_Poster.jpg'\n , 'https://www.youtube.com/watch?v=H0VW6sg50Pk', 'R',\n 'Science fiction horror', 'Ridley Scott', '123 Minutes')\navatar = media.Movie('Avatar', 'A marine on an alien planet',\n 'http://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg',\n 'http://www.youtube.com/watch?v=5PSNL1qE6VY', 'PG-13',\n 'Epic science fiction', 'James Cameron', '162 Minutes')\nokja = media.Movie('Okja',\n 'A young girl named Mija risks everything to prevent a powerful, multi-national company from kidnapping her best friend,a massive animal named Okja'\n , 'https://upload.wikimedia.org/wikipedia/en/f/f6/Okja.png',\n 'https://www.youtube.com/watch?v=AjCebKn4iic', 'R', 'Action-Adventure',\n 'Bong Joon-ho', '120 Minutes')\ngonegirl = media.Movie('Gone Girl', 'A sad story',\n 'http://upload.wikimedia.org/wikipedia/en/0/05/Gone_Girl_Poster.jpg',\n 'http://www.youtube.com/watch?v=Ym3LB0lOJ0o', 'R', 'Crime',\n 'David Fincher', '149 Minutes')\navenger = media.Movie('Avenger', 'A story about superheroes',\n 'http://upload.wikimedia.org/wikipedia/en/3/37/Captain_America_The_First_Avenger_poster.jpg'\n , 'http://www.youtube.com/watch?v=hIR8Ar-Z4hw', 'PG-13', 'Action',\n 'Joss Whedon', '143 Minutes')\ndark_knight = media.Movie('Dark knight rises', 'A story about batman',\n 'http://upload.wikimedia.org/wikipedia/en/8/83/Dark_knight_rises_poster.jpg'\n , 'http://www.youtube.com/watch?v=g8evyE9TuYk', 'PG-13', 'Action',\n 'Christopher Nolan', '165 Minutes')\nmovies = [alien_covenant, avatar, okja, gonegirl, avenger, dark_knight]\nfresh_tomatoes.open_movies_page(movies)\n", "step-4": "import fresh_tomatoes\nimport media\nalien_covenant = media.Movie('Alien: Covenant',\n 'The crew of a colony ship, bound for a remote planet, discover an uncharted paradise with a threat beyondtheir imagination,and must attempt a harrowing escape.'\n ,\n 'https://upload.wikimedia.org/wikipedia/en/3/33/Alien_Covenant_Teaser_Poster.jpg'\n , 'https://www.youtube.com/watch?v=H0VW6sg50Pk', 'R',\n 'Science fiction horror', 'Ridley Scott', '123 Minutes')\navatar = media.Movie('Avatar', 'A marine on an alien planet',\n 'http://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg',\n 'http://www.youtube.com/watch?v=5PSNL1qE6VY', 'PG-13',\n 'Epic science fiction', 'James Cameron', '162 Minutes')\nokja = media.Movie('Okja',\n 'A young girl named Mija risks everything to prevent a powerful, multi-national company from kidnapping her best friend,a massive animal named Okja'\n , 'https://upload.wikimedia.org/wikipedia/en/f/f6/Okja.png',\n 'https://www.youtube.com/watch?v=AjCebKn4iic', 'R', 'Action-Adventure',\n 'Bong Joon-ho', '120 Minutes')\ngonegirl = media.Movie('Gone Girl', 'A sad story',\n 'http://upload.wikimedia.org/wikipedia/en/0/05/Gone_Girl_Poster.jpg',\n 'http://www.youtube.com/watch?v=Ym3LB0lOJ0o', 'R', 'Crime',\n 'David Fincher', '149 Minutes')\navenger = media.Movie('Avenger', 'A story about superheroes',\n 'http://upload.wikimedia.org/wikipedia/en/3/37/Captain_America_The_First_Avenger_poster.jpg'\n , 'http://www.youtube.com/watch?v=hIR8Ar-Z4hw', 'PG-13', 'Action',\n 'Joss Whedon', '143 Minutes')\ndark_knight = media.Movie('Dark knight rises', 'A story about batman',\n 'http://upload.wikimedia.org/wikipedia/en/8/83/Dark_knight_rises_poster.jpg'\n , 'http://www.youtube.com/watch?v=g8evyE9TuYk', 'PG-13', 'Action',\n 'Christopher Nolan', '165 Minutes')\nmovies = [alien_covenant, avatar, okja, gonegirl, avenger, dark_knight]\nfresh_tomatoes.open_movies_page(movies)\n", "step-5": "# This is main file where we create the instances of Movie class\n# and run the file to view the movie website page\n\n# we have to import media where class Movie is defined and\n# fresh_tomatoes python files\nimport fresh_tomatoes\nimport media\n\n# Each instance has 8 arguments: Title, story line, poster image,\n# trailer url, rating, category, director, duration\nalien_covenant = media.Movie(\"Alien: Covenant\", \"The crew of a colony ship, \"\n \"bound for a remote planet, discover an \"\n \"uncharted paradise with a threat beyond\"\n \"their imagination,\"\n \"and must attempt a harrowing escape.\",\n \"https://upload.wikimedia.org/wikipedia/en/3/33/\"\n \"Alien_Covenant_Teaser_Poster.jpg\",\n \"https://www.youtube.com/watch?v=H0VW6sg50Pk\",\n \"R\",\n \"Science fiction horror\",\n \"Ridley Scott\",\n \"123 Minutes\")\n\navatar = media.Movie(\"Avatar\", \"A marine on an alien planet\",\n \"http://upload.wikimedia.org/wikipedia/en/\"\n \"b/b0/Avatar-Teaser-Poster.jpg\",\n \"http://www.youtube.com/watch?v=5PSNL1qE6VY\",\n \"PG-13\",\n \"Epic science fiction\",\n \"James Cameron\",\n \"162 Minutes\")\n\nokja = media.Movie(\"Okja\", \"A young girl named Mija risks everything to \"\n \"prevent a powerful, multi-national company \"\n \"from kidnapping her best friend,\"\n \"a massive animal named Okja\",\n \"https://upload.wikimedia.org/wikipedia/en/f/f6/Okja.png\",\n \"https://www.youtube.com/watch?v=AjCebKn4iic\",\n \"R\",\n \"Action-Adventure\",\n \"Bong Joon-ho\",\n \"120 Minutes\")\n\ngonegirl = media.Movie(\"Gone Girl\",\n \"A sad story\",\n \"http://upload.wikimedia.org/wikipedia/en/0/05/\"\n \"Gone_Girl_Poster.jpg\",\n \"http://www.youtube.com/watch?v=Ym3LB0lOJ0o\",\n \"R\",\n \"Crime\",\n \"David Fincher\",\n \"149 Minutes\")\n\navenger = media.Movie(\"Avenger\",\n \"A story about superheroes\",\n \"http://upload.wikimedia.org/wikipedia/en/3/37/\"\n \"Captain_America_The_First_Avenger_poster.jpg\",\n \"http://www.youtube.com/watch?v=hIR8Ar-Z4hw\",\n \"PG-13\",\n \"Action\",\n \"Joss Whedon\",\n \"143 Minutes\")\n\ndark_knight = media.Movie(\"Dark knight rises\",\n \"A story about batman\",\n \"http://upload.wikimedia.org/wikipedia/en/8/83/\"\n \"Dark_knight_rises_poster.jpg\",\n \"http://www.youtube.com/watch?v=g8evyE9TuYk\",\n \"PG-13\",\n \"Action\",\n \"Christopher Nolan\",\n \"165 Minutes\")\n\n\n# Creating a list of all instances\nmovies = [alien_covenant, avatar, okja, gonegirl, avenger, dark_knight]\n\n# Calling open_movies_page function to create fresh_tomatoes.html\n# file which contains a movie web page\nfresh_tomatoes.open_movies_page(movies)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from .factories import *
normal
{ "blob_id": "c036e6a0a9f06b08ee3eb43655dd833b46fd1e76", "index": 3690, "step-1": "<mask token>\n", "step-2": "from .factories import *\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> krait.mvc.set_init_ctrl(ws.WsPageController()) <|reserved_special_token_1|> import krait from ctrl import ws krait.mvc.set_init_ctrl(ws.WsPageController())
flexible
{ "blob_id": "da2b946238b429188fe3fa50286658d4b5cdbf41", "index": 5752, "step-1": "<mask token>\n", "step-2": "<mask token>\nkrait.mvc.set_init_ctrl(ws.WsPageController())\n", "step-3": "import krait\nfrom ctrl import ws\nkrait.mvc.set_init_ctrl(ws.WsPageController())\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
""" Package for haasplugin. """
normal
{ "blob_id": "20518302b6a67f8f1ac01f1adf4fe06ab2eaf280", "index": 3098, "step-1": "<mask token>\n", "step-2": "\"\"\"\nPackage for haasplugin.\n\"\"\"\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
<|reserved_special_token_0|> class Netonix: <|reserved_special_token_0|> def _get(self, url, params=None, timeout=15, **kwargs): full_url = 'https://' + self.ip + self.url[url] return self.s.get(full_url, params=params, timeout=timeout, **kwargs) <|reserved_special_token_0|> @staticmethod def _merge_by_key(old, new, key='Number', append=True): for item in new: found = False for old_item in old: if key not in old_item: continue if old_item[key] != item[key]: continue old_item.update(item) found = True break if found is False: if append is True: old_item.append(new) else: raise LookupError() def open(self, ip, user, password): self.ip = ip self.s = requests.session() self.s.verify = False data = {} data['username'] = user data['password'] = password r = self._post('login', data) if 'Invalid username or password' in r.text: raise Exception('Invalid username or password') <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def restore(self, i): raise Exception('the restore method is still untested.') newFile = open(i, 'rb') data = '' for a in newFile: data += a newFile.close() r = self._post('restore', data) print(r.json()) if r.status_code != requests.codes.ok: raise Exception('Restore Request Failed') r = self._get('reboot') return r.json() <|reserved_special_token_0|> <|reserved_special_token_0|> def getStatus(self): if self.id == '': self.getID() r = self.s.get('https://' + self.ip + self.url['status'] + '?%s&_=%d' % (self.id, time.time())) if r.status_code != requests.codes.ok: raise Exception('Action failed') self.status = r.json() def update(self, i): data = '' with open(i, mode='rb') as file: data = file.read() r = self._post('update', data) if r.status_code != requests.codes.ok: raise Exception('Firmware Upload Failed') r = self._get('doupdate') if r.status_code != requests.codes.ok: raise Exception('Update Request Failed') def mergeConfig(self, config): self.orig_config = deepcopy(self.config) for k, v in config.items(): if k == 'Ports': self._merge_by_key(self.config[k], v, key='Number') continue if k == 'LACP': self._merge_by_key(self.config[k], v, key='Port') continue if k == 'VLANs': self._merge_by_key(self.config[k], v, key='ID') continue if type(v) is dict: continue if type(v) is list: self.config[k] += v continue self.config[k] = v def replaceConfig(self, config): self.orig_config = deepcopy(self.config) if 'Config_Version' in config: del config['Config_Version'] self.config.update(config) <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Netonix: <|reserved_special_token_0|> def _get(self, url, params=None, timeout=15, **kwargs): full_url = 'https://' + self.ip + self.url[url] return self.s.get(full_url, params=params, timeout=timeout, **kwargs) <|reserved_special_token_0|> @staticmethod def _merge_by_key(old, new, key='Number', append=True): for item in new: found = False for old_item in old: if key not in old_item: continue if old_item[key] != item[key]: continue old_item.update(item) found = True break if found is False: if append is True: old_item.append(new) else: raise LookupError() def open(self, ip, user, password): self.ip = ip self.s = requests.session() self.s.verify = False data = {} data['username'] = user data['password'] = password r = self._post('login', data) if 'Invalid username or password' in r.text: raise Exception('Invalid username or password') def getConfig(self): r = self._get('config') result = r.json() if 'Config_Version' in result: self.config = result <|reserved_special_token_0|> def backup(self, output): r = self.s.get('https://' + self.ip + self.url['backup'] + '/' + self.ip) if r.status_code != requests.codes.ok: raise Exception('Backup Request Failed') newFile = open(output, 'wb') newFile.write(r.content) newFile.close() def restore(self, i): raise Exception('the restore method is still untested.') newFile = open(i, 'rb') data = '' for a in newFile: data += a newFile.close() r = self._post('restore', data) print(r.json()) if r.status_code != requests.codes.ok: raise Exception('Restore Request Failed') r = self._get('reboot') return r.json() <|reserved_special_token_0|> <|reserved_special_token_0|> def getStatus(self): if self.id == '': self.getID() r = self.s.get('https://' + self.ip + self.url['status'] + '?%s&_=%d' % (self.id, time.time())) if r.status_code != requests.codes.ok: raise Exception('Action failed') self.status = r.json() def update(self, i): data = '' with open(i, mode='rb') as file: data = file.read() r = self._post('update', data) if r.status_code != requests.codes.ok: raise Exception('Firmware Upload Failed') r = self._get('doupdate') if r.status_code != requests.codes.ok: raise Exception('Update Request Failed') def mergeConfig(self, config): self.orig_config = deepcopy(self.config) for k, v in config.items(): if k == 'Ports': self._merge_by_key(self.config[k], v, key='Number') continue if k == 'LACP': self._merge_by_key(self.config[k], v, key='Port') continue if k == 'VLANs': self._merge_by_key(self.config[k], v, key='ID') continue if type(v) is dict: continue if type(v) is list: self.config[k] += v continue self.config[k] = v def replaceConfig(self, config): self.orig_config = deepcopy(self.config) if 'Config_Version' in config: del config['Config_Version'] self.config.update(config) <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Netonix: <|reserved_special_token_0|> def _get(self, url, params=None, timeout=15, **kwargs): full_url = 'https://' + self.ip + self.url[url] return self.s.get(full_url, params=params, timeout=timeout, **kwargs) <|reserved_special_token_0|> @staticmethod def _merge_by_key(old, new, key='Number', append=True): for item in new: found = False for old_item in old: if key not in old_item: continue if old_item[key] != item[key]: continue old_item.update(item) found = True break if found is False: if append is True: old_item.append(new) else: raise LookupError() def open(self, ip, user, password): self.ip = ip self.s = requests.session() self.s.verify = False data = {} data['username'] = user data['password'] = password r = self._post('login', data) if 'Invalid username or password' in r.text: raise Exception('Invalid username or password') def getConfig(self): r = self._get('config') result = r.json() if 'Config_Version' in result: self.config = result <|reserved_special_token_0|> def backup(self, output): r = self.s.get('https://' + self.ip + self.url['backup'] + '/' + self.ip) if r.status_code != requests.codes.ok: raise Exception('Backup Request Failed') newFile = open(output, 'wb') newFile.write(r.content) newFile.close() def restore(self, i): raise Exception('the restore method is still untested.') newFile = open(i, 'rb') data = '' for a in newFile: data += a newFile.close() r = self._post('restore', data) print(r.json()) if r.status_code != requests.codes.ok: raise Exception('Restore Request Failed') r = self._get('reboot') return r.json() <|reserved_special_token_0|> def getID(self): r = self._get('id', params={'_': time.time()}) if r.status_code != requests.codes.ok: raise Exception('Action failed') self.id = r.json()['BootID'] def getStatus(self): if self.id == '': self.getID() r = self.s.get('https://' + self.ip + self.url['status'] + '?%s&_=%d' % (self.id, time.time())) if r.status_code != requests.codes.ok: raise Exception('Action failed') self.status = r.json() def update(self, i): data = '' with open(i, mode='rb') as file: data = file.read() r = self._post('update', data) if r.status_code != requests.codes.ok: raise Exception('Firmware Upload Failed') r = self._get('doupdate') if r.status_code != requests.codes.ok: raise Exception('Update Request Failed') def mergeConfig(self, config): self.orig_config = deepcopy(self.config) for k, v in config.items(): if k == 'Ports': self._merge_by_key(self.config[k], v, key='Number') continue if k == 'LACP': self._merge_by_key(self.config[k], v, key='Port') continue if k == 'VLANs': self._merge_by_key(self.config[k], v, key='ID') continue if type(v) is dict: continue if type(v) is list: self.config[k] += v continue self.config[k] = v def replaceConfig(self, config): self.orig_config = deepcopy(self.config) if 'Config_Version' in config: del config['Config_Version'] self.config.update(config) def getDiff(self): if self.orig_config is None: return {} if DIFF is False: raise ImportError('Missing DeepDiff Module') return DeepDiff(self.orig_config, self.config, exclude_paths= "root['Config_Version']") <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Netonix: def __init__(self): self.ip = None self.s = None self.url = {} self.url['login'] = '/index.php' self.url['backup'] = '/api/v1/backup' self.url['config'] = '/api/v1/config' self.url['apply'] = '/api/v1/apply' self.url['confirm'] = '/api/v1/applystatus' self.url['reboot'] = '/api/v1/reboot' self.url['restore'] = '/api/v1/restore' self.url['mac'] = '/api/v1/mactable' self.url['status'] = '/api/v1/status/30sec' self.url['id'] = '/api/v1/bootid' self.url['update'] = '/api/v1/uploadfirmware' self.url['doupdate'] = '/api/v1/upgradefirmware' self.config = {} self.orig_config = None self.mac = {} self.status = {} self.id = '' def _get(self, url, params=None, timeout=15, **kwargs): full_url = 'https://' + self.ip + self.url[url] return self.s.get(full_url, params=params, timeout=timeout, **kwargs) <|reserved_special_token_0|> @staticmethod def _merge_by_key(old, new, key='Number', append=True): for item in new: found = False for old_item in old: if key not in old_item: continue if old_item[key] != item[key]: continue old_item.update(item) found = True break if found is False: if append is True: old_item.append(new) else: raise LookupError() def open(self, ip, user, password): self.ip = ip self.s = requests.session() self.s.verify = False data = {} data['username'] = user data['password'] = password r = self._post('login', data) if 'Invalid username or password' in r.text: raise Exception('Invalid username or password') def getConfig(self): r = self._get('config') result = r.json() if 'Config_Version' in result: self.config = result <|reserved_special_token_0|> def backup(self, output): r = self.s.get('https://' + self.ip + self.url['backup'] + '/' + self.ip) if r.status_code != requests.codes.ok: raise Exception('Backup Request Failed') newFile = open(output, 'wb') newFile.write(r.content) newFile.close() def restore(self, i): raise Exception('the restore method is still untested.') newFile = open(i, 'rb') data = '' for a in newFile: data += a newFile.close() r = self._post('restore', data) print(r.json()) if r.status_code != requests.codes.ok: raise Exception('Restore Request Failed') r = self._get('reboot') return r.json() <|reserved_special_token_0|> def getID(self): r = self._get('id', params={'_': time.time()}) if r.status_code != requests.codes.ok: raise Exception('Action failed') self.id = r.json()['BootID'] def getStatus(self): if self.id == '': self.getID() r = self.s.get('https://' + self.ip + self.url['status'] + '?%s&_=%d' % (self.id, time.time())) if r.status_code != requests.codes.ok: raise Exception('Action failed') self.status = r.json() def update(self, i): data = '' with open(i, mode='rb') as file: data = file.read() r = self._post('update', data) if r.status_code != requests.codes.ok: raise Exception('Firmware Upload Failed') r = self._get('doupdate') if r.status_code != requests.codes.ok: raise Exception('Update Request Failed') def mergeConfig(self, config): self.orig_config = deepcopy(self.config) for k, v in config.items(): if k == 'Ports': self._merge_by_key(self.config[k], v, key='Number') continue if k == 'LACP': self._merge_by_key(self.config[k], v, key='Port') continue if k == 'VLANs': self._merge_by_key(self.config[k], v, key='ID') continue if type(v) is dict: continue if type(v) is list: self.config[k] += v continue self.config[k] = v def replaceConfig(self, config): self.orig_config = deepcopy(self.config) if 'Config_Version' in config: del config['Config_Version'] self.config.update(config) def getDiff(self): if self.orig_config is None: return {} if DIFF is False: raise ImportError('Missing DeepDiff Module') return DeepDiff(self.orig_config, self.config, exclude_paths= "root['Config_Version']") <|reserved_special_token_0|> <|reserved_special_token_1|> #!/usr/bin/env python3 """ Python class to access Netonix® WISP Switch WebAPI ** NEITHER THIS CODE NOR THE AUTHOR IS ASSOCIATED WITH NETONIX® IN ANY WAY.** This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> """ import requests from requests.exceptions import Timeout from copy import deepcopy import time import json try: from deepdiff import DeepDiff DIFF = True except: DIFF = False class Netonix(): def __init__(self): self.ip = None self.s = None self.url = {} self.url["login"] = "/index.php" self.url["backup"] = "/api/v1/backup" self.url["config"] = "/api/v1/config" self.url["apply"] = "/api/v1/apply" self.url["confirm"] = "/api/v1/applystatus" self.url["reboot"] = "/api/v1/reboot" self.url["restore"] = "/api/v1/restore" self.url["mac"] = "/api/v1/mactable" self.url["status"] = "/api/v1/status/30sec" self.url["id"] = "/api/v1/bootid" self.url["update"] = "/api/v1/uploadfirmware" self.url["doupdate"] = "/api/v1/upgradefirmware" self.config = {} self.orig_config = None self.mac = {} self.status = {} self.id = "" def _get(self, url, params=None, timeout=15, **kwargs): full_url = "https://"+self.ip+self.url[url] return self.s.get(full_url, params=params, timeout=timeout, **kwargs) def _post(self, url, data=None, json=None, timeout=15, **kwargs): full_url = "https://"+self.ip+self.url[url] return self.s.post( full_url, data=data, json=json, timeout=timeout, **kwargs ) @staticmethod def _merge_by_key(old, new, key="Number", append=True): for item in new: found = False for old_item in old: if(key not in old_item): continue if(old_item[key] != item[key]): continue old_item.update(item) found = True break if(found is False): if(append is True): old_item.append(new) else: raise LookupError() def open(self, ip, user, password): self.ip = ip self.s = requests.session() self.s.verify = False data = {} data["username"] = user data["password"] = password r = self._post("login", data) if("Invalid username or password" in r.text): raise Exception("Invalid username or password") def getConfig(self): r = self._get("config") result = r.json() if("Config_Version" in result): self.config = result def putConfig(self): r = self._post("config", json=self.config) try: r = self._post("apply") except Timeout: pass self.ip = self.config["IPv4_Address"] for a in range(5): try: r = self._post("confirm") except Timeout: continue break if(r.status_code != requests.codes.ok): raise Exception("Config Confirm Request Failed") # return r.json() def backup(self, output): r = self.s.get("https://"+self.ip+self.url["backup"]+"/"+self.ip) if(r.status_code != requests.codes.ok): raise Exception("Backup Request Failed") newFile = open(output, "wb") newFile.write(r.content) newFile.close() def restore(self, i): raise Exception("the restore method is still untested.") newFile = open(i, "rb") data = "" for a in newFile: data += a newFile.close() r = self._post("restore", data) print(r.json()) if(r.status_code != requests.codes.ok): raise Exception("Restore Request Failed") r = self._get("reboot") return r.json() def getMAC(self): r = self._get("mac", timeout=60) if(r.status_code != requests.codes.ok): raise Exception("Action failed") self.mac = r.json()["MACTable"] def getID(self): r = self._get("id", params={"_": time.time()}) if(r.status_code != requests.codes.ok): raise Exception("Action failed") self.id = r.json()["BootID"] def getStatus(self): if(self.id == ""): self.getID() r = self.s.get("https://"+self.ip+self.url["status"]+"?%s&_=%d" % (self.id, time.time())) if(r.status_code != requests.codes.ok): raise Exception("Action failed") self.status = r.json() def update(self, i): data = "" with open(i, mode='rb') as file: # b is important -> binary data = file.read() r = self._post("update", data) if(r.status_code != requests.codes.ok): raise Exception("Firmware Upload Failed") r = self._get("doupdate") if(r.status_code != requests.codes.ok): raise Exception("Update Request Failed") def mergeConfig(self, config): self.orig_config = deepcopy(self.config) for k, v in config.items(): if(k == "Ports"): self._merge_by_key(self.config[k], v, key="Number") continue if(k == "LACP"): self._merge_by_key(self.config[k], v, key="Port") continue if(k == "VLANs"): self._merge_by_key(self.config[k], v, key="ID") continue if(type(v) is dict): continue if(type(v) is list): self.config[k] += v continue self.config[k] = v def replaceConfig(self, config): self.orig_config = deepcopy(self.config) if("Config_Version" in config): del config["Config_Version"] self.config.update(config) def getDiff(self): if(self.orig_config is None): return {} if(DIFF is False): raise ImportError("Missing DeepDiff Module") return DeepDiff( self.orig_config, self.config, exclude_paths="root['Config_Version']" ) if __name__ == '__main__': import getpass import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) ip = str(input("switch ip:")) user = str(input("user:")) pw = getpass.getpass("password:") n = Netonix() n.open(ip, user, pw) n.getMAC() print(json.dumps(n.mac, indent=4)) n.getMAC() print(json.dumps(n.mac, indent=4))
flexible
{ "blob_id": "743d261052e4532c1304647501719ad897224b4e", "index": 8991, "step-1": "<mask token>\n\n\nclass Netonix:\n <mask token>\n\n def _get(self, url, params=None, timeout=15, **kwargs):\n full_url = 'https://' + self.ip + self.url[url]\n return self.s.get(full_url, params=params, timeout=timeout, **kwargs)\n <mask token>\n\n @staticmethod\n def _merge_by_key(old, new, key='Number', append=True):\n for item in new:\n found = False\n for old_item in old:\n if key not in old_item:\n continue\n if old_item[key] != item[key]:\n continue\n old_item.update(item)\n found = True\n break\n if found is False:\n if append is True:\n old_item.append(new)\n else:\n raise LookupError()\n\n def open(self, ip, user, password):\n self.ip = ip\n self.s = requests.session()\n self.s.verify = False\n data = {}\n data['username'] = user\n data['password'] = password\n r = self._post('login', data)\n if 'Invalid username or password' in r.text:\n raise Exception('Invalid username or password')\n <mask token>\n <mask token>\n <mask token>\n\n def restore(self, i):\n raise Exception('the restore method is still untested.')\n newFile = open(i, 'rb')\n data = ''\n for a in newFile:\n data += a\n newFile.close()\n r = self._post('restore', data)\n print(r.json())\n if r.status_code != requests.codes.ok:\n raise Exception('Restore Request Failed')\n r = self._get('reboot')\n return r.json()\n <mask token>\n <mask token>\n\n def getStatus(self):\n if self.id == '':\n self.getID()\n r = self.s.get('https://' + self.ip + self.url['status'] + \n '?%s&_=%d' % (self.id, time.time()))\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.status = r.json()\n\n def update(self, i):\n data = ''\n with open(i, mode='rb') as file:\n data = file.read()\n r = self._post('update', data)\n if r.status_code != requests.codes.ok:\n raise Exception('Firmware Upload Failed')\n r = self._get('doupdate')\n if r.status_code != requests.codes.ok:\n raise Exception('Update Request Failed')\n\n def mergeConfig(self, config):\n self.orig_config = deepcopy(self.config)\n for k, v in config.items():\n if k == 'Ports':\n self._merge_by_key(self.config[k], v, key='Number')\n continue\n if k == 'LACP':\n self._merge_by_key(self.config[k], v, key='Port')\n continue\n if k == 'VLANs':\n self._merge_by_key(self.config[k], v, key='ID')\n continue\n if type(v) is dict:\n continue\n if type(v) is list:\n self.config[k] += v\n continue\n self.config[k] = v\n\n def replaceConfig(self, config):\n self.orig_config = deepcopy(self.config)\n if 'Config_Version' in config:\n del config['Config_Version']\n self.config.update(config)\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Netonix:\n <mask token>\n\n def _get(self, url, params=None, timeout=15, **kwargs):\n full_url = 'https://' + self.ip + self.url[url]\n return self.s.get(full_url, params=params, timeout=timeout, **kwargs)\n <mask token>\n\n @staticmethod\n def _merge_by_key(old, new, key='Number', append=True):\n for item in new:\n found = False\n for old_item in old:\n if key not in old_item:\n continue\n if old_item[key] != item[key]:\n continue\n old_item.update(item)\n found = True\n break\n if found is False:\n if append is True:\n old_item.append(new)\n else:\n raise LookupError()\n\n def open(self, ip, user, password):\n self.ip = ip\n self.s = requests.session()\n self.s.verify = False\n data = {}\n data['username'] = user\n data['password'] = password\n r = self._post('login', data)\n if 'Invalid username or password' in r.text:\n raise Exception('Invalid username or password')\n\n def getConfig(self):\n r = self._get('config')\n result = r.json()\n if 'Config_Version' in result:\n self.config = result\n <mask token>\n\n def backup(self, output):\n r = self.s.get('https://' + self.ip + self.url['backup'] + '/' +\n self.ip)\n if r.status_code != requests.codes.ok:\n raise Exception('Backup Request Failed')\n newFile = open(output, 'wb')\n newFile.write(r.content)\n newFile.close()\n\n def restore(self, i):\n raise Exception('the restore method is still untested.')\n newFile = open(i, 'rb')\n data = ''\n for a in newFile:\n data += a\n newFile.close()\n r = self._post('restore', data)\n print(r.json())\n if r.status_code != requests.codes.ok:\n raise Exception('Restore Request Failed')\n r = self._get('reboot')\n return r.json()\n <mask token>\n <mask token>\n\n def getStatus(self):\n if self.id == '':\n self.getID()\n r = self.s.get('https://' + self.ip + self.url['status'] + \n '?%s&_=%d' % (self.id, time.time()))\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.status = r.json()\n\n def update(self, i):\n data = ''\n with open(i, mode='rb') as file:\n data = file.read()\n r = self._post('update', data)\n if r.status_code != requests.codes.ok:\n raise Exception('Firmware Upload Failed')\n r = self._get('doupdate')\n if r.status_code != requests.codes.ok:\n raise Exception('Update Request Failed')\n\n def mergeConfig(self, config):\n self.orig_config = deepcopy(self.config)\n for k, v in config.items():\n if k == 'Ports':\n self._merge_by_key(self.config[k], v, key='Number')\n continue\n if k == 'LACP':\n self._merge_by_key(self.config[k], v, key='Port')\n continue\n if k == 'VLANs':\n self._merge_by_key(self.config[k], v, key='ID')\n continue\n if type(v) is dict:\n continue\n if type(v) is list:\n self.config[k] += v\n continue\n self.config[k] = v\n\n def replaceConfig(self, config):\n self.orig_config = deepcopy(self.config)\n if 'Config_Version' in config:\n del config['Config_Version']\n self.config.update(config)\n <mask token>\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Netonix:\n <mask token>\n\n def _get(self, url, params=None, timeout=15, **kwargs):\n full_url = 'https://' + self.ip + self.url[url]\n return self.s.get(full_url, params=params, timeout=timeout, **kwargs)\n <mask token>\n\n @staticmethod\n def _merge_by_key(old, new, key='Number', append=True):\n for item in new:\n found = False\n for old_item in old:\n if key not in old_item:\n continue\n if old_item[key] != item[key]:\n continue\n old_item.update(item)\n found = True\n break\n if found is False:\n if append is True:\n old_item.append(new)\n else:\n raise LookupError()\n\n def open(self, ip, user, password):\n self.ip = ip\n self.s = requests.session()\n self.s.verify = False\n data = {}\n data['username'] = user\n data['password'] = password\n r = self._post('login', data)\n if 'Invalid username or password' in r.text:\n raise Exception('Invalid username or password')\n\n def getConfig(self):\n r = self._get('config')\n result = r.json()\n if 'Config_Version' in result:\n self.config = result\n <mask token>\n\n def backup(self, output):\n r = self.s.get('https://' + self.ip + self.url['backup'] + '/' +\n self.ip)\n if r.status_code != requests.codes.ok:\n raise Exception('Backup Request Failed')\n newFile = open(output, 'wb')\n newFile.write(r.content)\n newFile.close()\n\n def restore(self, i):\n raise Exception('the restore method is still untested.')\n newFile = open(i, 'rb')\n data = ''\n for a in newFile:\n data += a\n newFile.close()\n r = self._post('restore', data)\n print(r.json())\n if r.status_code != requests.codes.ok:\n raise Exception('Restore Request Failed')\n r = self._get('reboot')\n return r.json()\n <mask token>\n\n def getID(self):\n r = self._get('id', params={'_': time.time()})\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.id = r.json()['BootID']\n\n def getStatus(self):\n if self.id == '':\n self.getID()\n r = self.s.get('https://' + self.ip + self.url['status'] + \n '?%s&_=%d' % (self.id, time.time()))\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.status = r.json()\n\n def update(self, i):\n data = ''\n with open(i, mode='rb') as file:\n data = file.read()\n r = self._post('update', data)\n if r.status_code != requests.codes.ok:\n raise Exception('Firmware Upload Failed')\n r = self._get('doupdate')\n if r.status_code != requests.codes.ok:\n raise Exception('Update Request Failed')\n\n def mergeConfig(self, config):\n self.orig_config = deepcopy(self.config)\n for k, v in config.items():\n if k == 'Ports':\n self._merge_by_key(self.config[k], v, key='Number')\n continue\n if k == 'LACP':\n self._merge_by_key(self.config[k], v, key='Port')\n continue\n if k == 'VLANs':\n self._merge_by_key(self.config[k], v, key='ID')\n continue\n if type(v) is dict:\n continue\n if type(v) is list:\n self.config[k] += v\n continue\n self.config[k] = v\n\n def replaceConfig(self, config):\n self.orig_config = deepcopy(self.config)\n if 'Config_Version' in config:\n del config['Config_Version']\n self.config.update(config)\n\n def getDiff(self):\n if self.orig_config is None:\n return {}\n if DIFF is False:\n raise ImportError('Missing DeepDiff Module')\n return DeepDiff(self.orig_config, self.config, exclude_paths=\n \"root['Config_Version']\")\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass Netonix:\n\n def __init__(self):\n self.ip = None\n self.s = None\n self.url = {}\n self.url['login'] = '/index.php'\n self.url['backup'] = '/api/v1/backup'\n self.url['config'] = '/api/v1/config'\n self.url['apply'] = '/api/v1/apply'\n self.url['confirm'] = '/api/v1/applystatus'\n self.url['reboot'] = '/api/v1/reboot'\n self.url['restore'] = '/api/v1/restore'\n self.url['mac'] = '/api/v1/mactable'\n self.url['status'] = '/api/v1/status/30sec'\n self.url['id'] = '/api/v1/bootid'\n self.url['update'] = '/api/v1/uploadfirmware'\n self.url['doupdate'] = '/api/v1/upgradefirmware'\n self.config = {}\n self.orig_config = None\n self.mac = {}\n self.status = {}\n self.id = ''\n\n def _get(self, url, params=None, timeout=15, **kwargs):\n full_url = 'https://' + self.ip + self.url[url]\n return self.s.get(full_url, params=params, timeout=timeout, **kwargs)\n <mask token>\n\n @staticmethod\n def _merge_by_key(old, new, key='Number', append=True):\n for item in new:\n found = False\n for old_item in old:\n if key not in old_item:\n continue\n if old_item[key] != item[key]:\n continue\n old_item.update(item)\n found = True\n break\n if found is False:\n if append is True:\n old_item.append(new)\n else:\n raise LookupError()\n\n def open(self, ip, user, password):\n self.ip = ip\n self.s = requests.session()\n self.s.verify = False\n data = {}\n data['username'] = user\n data['password'] = password\n r = self._post('login', data)\n if 'Invalid username or password' in r.text:\n raise Exception('Invalid username or password')\n\n def getConfig(self):\n r = self._get('config')\n result = r.json()\n if 'Config_Version' in result:\n self.config = result\n <mask token>\n\n def backup(self, output):\n r = self.s.get('https://' + self.ip + self.url['backup'] + '/' +\n self.ip)\n if r.status_code != requests.codes.ok:\n raise Exception('Backup Request Failed')\n newFile = open(output, 'wb')\n newFile.write(r.content)\n newFile.close()\n\n def restore(self, i):\n raise Exception('the restore method is still untested.')\n newFile = open(i, 'rb')\n data = ''\n for a in newFile:\n data += a\n newFile.close()\n r = self._post('restore', data)\n print(r.json())\n if r.status_code != requests.codes.ok:\n raise Exception('Restore Request Failed')\n r = self._get('reboot')\n return r.json()\n <mask token>\n\n def getID(self):\n r = self._get('id', params={'_': time.time()})\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.id = r.json()['BootID']\n\n def getStatus(self):\n if self.id == '':\n self.getID()\n r = self.s.get('https://' + self.ip + self.url['status'] + \n '?%s&_=%d' % (self.id, time.time()))\n if r.status_code != requests.codes.ok:\n raise Exception('Action failed')\n self.status = r.json()\n\n def update(self, i):\n data = ''\n with open(i, mode='rb') as file:\n data = file.read()\n r = self._post('update', data)\n if r.status_code != requests.codes.ok:\n raise Exception('Firmware Upload Failed')\n r = self._get('doupdate')\n if r.status_code != requests.codes.ok:\n raise Exception('Update Request Failed')\n\n def mergeConfig(self, config):\n self.orig_config = deepcopy(self.config)\n for k, v in config.items():\n if k == 'Ports':\n self._merge_by_key(self.config[k], v, key='Number')\n continue\n if k == 'LACP':\n self._merge_by_key(self.config[k], v, key='Port')\n continue\n if k == 'VLANs':\n self._merge_by_key(self.config[k], v, key='ID')\n continue\n if type(v) is dict:\n continue\n if type(v) is list:\n self.config[k] += v\n continue\n self.config[k] = v\n\n def replaceConfig(self, config):\n self.orig_config = deepcopy(self.config)\n if 'Config_Version' in config:\n del config['Config_Version']\n self.config.update(config)\n\n def getDiff(self):\n if self.orig_config is None:\n return {}\n if DIFF is False:\n raise ImportError('Missing DeepDiff Module')\n return DeepDiff(self.orig_config, self.config, exclude_paths=\n \"root['Config_Version']\")\n\n\n<mask token>\n", "step-5": "#!/usr/bin/env python3\n\"\"\"\nPython class to access Netonix® WISP Switch WebAPI\n\n** NEITHER THIS CODE NOR THE AUTHOR IS ASSOCIATED WITH NETONIX® IN ANY WAY.**\n\nThis is free and unencumbered software released into the public domain.\n\nAnyone is free to copy, modify, publish, use, compile, sell, or\ndistribute this software, either in source code form or as a compiled\nbinary, for any purpose, commercial or non-commercial, and by any\nmeans.\n\nIn jurisdictions that recognize copyright laws, the author or authors\nof this software dedicate any and all copyright interest in the\nsoftware to the public domain. We make this dedication for the benefit\nof the public at large and to the detriment of our heirs and\nsuccessors. We intend this dedication to be an overt act of\nrelinquishment in perpetuity of all present and future rights to this\nsoftware under copyright law.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\nOTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\nARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\nOTHER DEALINGS IN THE SOFTWARE.\n\nFor more information, please refer to <http://unlicense.org/>\n\"\"\"\n\nimport requests\nfrom requests.exceptions import Timeout\nfrom copy import deepcopy\nimport time\nimport json\ntry:\n from deepdiff import DeepDiff\n DIFF = True\nexcept:\n DIFF = False\n\nclass Netonix():\n def __init__(self):\n self.ip = None\n self.s = None\n self.url = {}\n self.url[\"login\"] = \"/index.php\"\n self.url[\"backup\"] = \"/api/v1/backup\"\n self.url[\"config\"] = \"/api/v1/config\"\n self.url[\"apply\"] = \"/api/v1/apply\"\n self.url[\"confirm\"] = \"/api/v1/applystatus\"\n self.url[\"reboot\"] = \"/api/v1/reboot\"\n self.url[\"restore\"] = \"/api/v1/restore\"\n self.url[\"mac\"] = \"/api/v1/mactable\"\n self.url[\"status\"] = \"/api/v1/status/30sec\"\n self.url[\"id\"] = \"/api/v1/bootid\"\n self.url[\"update\"] = \"/api/v1/uploadfirmware\"\n self.url[\"doupdate\"] = \"/api/v1/upgradefirmware\"\n self.config = {}\n self.orig_config = None\n self.mac = {}\n self.status = {}\n self.id = \"\"\n\n def _get(self, url, params=None, timeout=15, **kwargs):\n full_url = \"https://\"+self.ip+self.url[url]\n return self.s.get(full_url, params=params, timeout=timeout, **kwargs)\n\n def _post(self, url, data=None, json=None, timeout=15, **kwargs):\n full_url = \"https://\"+self.ip+self.url[url]\n return self.s.post(\n full_url,\n data=data,\n json=json,\n timeout=timeout,\n **kwargs\n )\n\n @staticmethod\n def _merge_by_key(old, new, key=\"Number\", append=True):\n for item in new:\n found = False\n for old_item in old:\n if(key not in old_item):\n continue\n if(old_item[key] != item[key]):\n continue\n old_item.update(item)\n found = True\n break\n if(found is False):\n if(append is True):\n old_item.append(new)\n else:\n raise LookupError()\n\n def open(self, ip, user, password):\n self.ip = ip\n self.s = requests.session()\n self.s.verify = False\n data = {}\n data[\"username\"] = user\n data[\"password\"] = password\n r = self._post(\"login\", data)\n if(\"Invalid username or password\" in r.text):\n raise Exception(\"Invalid username or password\")\n\n def getConfig(self):\n r = self._get(\"config\")\n result = r.json()\n if(\"Config_Version\" in result):\n self.config = result\n\n def putConfig(self):\n r = self._post(\"config\", json=self.config)\n try:\n r = self._post(\"apply\")\n except Timeout:\n pass\n self.ip = self.config[\"IPv4_Address\"]\n for a in range(5):\n try:\n r = self._post(\"confirm\")\n except Timeout:\n continue\n break\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Config Confirm Request Failed\")\n # return r.json()\n\n def backup(self, output):\n r = self.s.get(\"https://\"+self.ip+self.url[\"backup\"]+\"/\"+self.ip)\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Backup Request Failed\")\n newFile = open(output, \"wb\")\n newFile.write(r.content)\n newFile.close()\n\n def restore(self, i):\n raise Exception(\"the restore method is still untested.\")\n newFile = open(i, \"rb\")\n data = \"\"\n for a in newFile:\n data += a\n newFile.close()\n r = self._post(\"restore\", data)\n print(r.json())\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Restore Request Failed\")\n r = self._get(\"reboot\")\n return r.json()\n\n def getMAC(self):\n r = self._get(\"mac\", timeout=60)\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Action failed\")\n self.mac = r.json()[\"MACTable\"]\n\n def getID(self):\n r = self._get(\"id\", params={\"_\": time.time()})\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Action failed\")\n self.id = r.json()[\"BootID\"]\n\n def getStatus(self):\n if(self.id == \"\"):\n self.getID()\n r = self.s.get(\"https://\"+self.ip+self.url[\"status\"]+\"?%s&_=%d\" % (self.id, time.time()))\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Action failed\")\n self.status = r.json()\n\n def update(self, i):\n data = \"\"\n with open(i, mode='rb') as file: # b is important -> binary\n data = file.read()\n r = self._post(\"update\", data)\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Firmware Upload Failed\")\n r = self._get(\"doupdate\")\n if(r.status_code != requests.codes.ok):\n raise Exception(\"Update Request Failed\")\n\n def mergeConfig(self, config):\n self.orig_config = deepcopy(self.config)\n\n for k, v in config.items():\n if(k == \"Ports\"):\n self._merge_by_key(self.config[k], v, key=\"Number\")\n continue\n if(k == \"LACP\"):\n self._merge_by_key(self.config[k], v, key=\"Port\")\n continue\n if(k == \"VLANs\"):\n self._merge_by_key(self.config[k], v, key=\"ID\")\n continue\n if(type(v) is dict):\n continue\n if(type(v) is list):\n self.config[k] += v\n continue\n self.config[k] = v\n\n def replaceConfig(self, config):\n self.orig_config = deepcopy(self.config)\n\n if(\"Config_Version\" in config):\n del config[\"Config_Version\"]\n self.config.update(config)\n\n def getDiff(self):\n if(self.orig_config is None):\n return {}\n if(DIFF is False):\n raise ImportError(\"Missing DeepDiff Module\")\n return DeepDiff(\n self.orig_config,\n self.config,\n exclude_paths=\"root['Config_Version']\"\n )\n\n\nif __name__ == '__main__':\n import getpass\n import urllib3\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n ip = str(input(\"switch ip:\"))\n user = str(input(\"user:\"))\n pw = getpass.getpass(\"password:\")\n n = Netonix()\n n.open(ip, user, pw)\n n.getMAC()\n print(json.dumps(n.mac, indent=4))\n n.getMAC()\n print(json.dumps(n.mac, indent=4))\n", "step-ids": [ 9, 11, 13, 14, 20 ] }
[ 9, 11, 13, 14, 20 ]
# -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-04-11 03:58 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('produksi', '0055_auto_20190409_1316'), ] operations = [ migrations.RemoveField( model_name='transisi', name='status_perpindahan', ), ]
normal
{ "blob_id": "1eb5df463bbd39002c5dbc3f88459e2f26d4b465", "index": 8505, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('produksi', '0055_auto_20190409_1316')]\n operations = [migrations.RemoveField(model_name='transisi', name=\n 'status_perpindahan')]\n", "step-4": "from __future__ import unicode_literals\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('produksi', '0055_auto_20190409_1316')]\n operations = [migrations.RemoveField(model_name='transisi', name=\n 'status_perpindahan')]\n", "step-5": "# -*- coding: utf-8 -*-\r\n# Generated by Django 1.11.20 on 2019-04-11 03:58\r\nfrom __future__ import unicode_literals\r\n\r\nfrom django.db import migrations\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('produksi', '0055_auto_20190409_1316'),\r\n ]\r\n\r\n operations = [\r\n migrations.RemoveField(\r\n model_name='transisi',\r\n name='status_perpindahan',\r\n ),\r\n ]\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#@@---------------------------@@ # Author: Chamil Jayasundara # Date: 5/18/17 # Description: Extract SFLOW data from slow logs #@@---------------------------@@ import itertools from collections import defaultdict """Flow Sample and Datagram Objects""" class Container(object): def __init__(self, id): self.id = id self.content = defaultdict(int) def __getitem__(self, key): return self.content[key] def __setitem__(self, key, value): self.content[key] = value class Datagram(Container): datagram_counter = itertools.count().next def __init__(self): super(Datagram, self).__init__(Datagram.datagram_counter()) self['flowSamples'] = {} class FlowSample(Container): flowsample_counter = itertools.count().next def __init__(self): super(FlowSample, self).__init__(FlowSample.flowsample_counter()) ############################# """Data Extraction""" def process_line_and_store_in_obj(line, obj): partition = line.partition(" ") obj[partition[0]] = partition[2].rstrip() ###State Machine Classses class WithinDatagram(object): def __init__(self, traceObj): self.Trace = traceObj self.current_datagram = None def process(self,line): if "startDatagram" in line: self.current_datagram = Datagram() elif "endDatagram" in line: self.Trace.callable(self.current_datagram.content) elif "startSample" in line: self.Trace.currentState = self.Trace.within_flowsample self.Trace.within_flowsample.re_init(FlowSample(), self.current_datagram) else: process_line_and_store_in_obj(line, self.current_datagram) class WithinFlowsample(object): def __init__(self, traceObj): self.Trace = traceObj self.current_datagram = None self.current_flowsample = None def re_init(self, flowsampleObj, datagramObj): self.current_datagram = datagramObj self.current_flowsample = flowsampleObj def process(self,line): if "endSample" in line: self.current_datagram['flowSamples'][self.current_flowsample.id] = self.current_flowsample.content self.Trace.currentState = self.Trace.within_datagram else: process_line_and_store_in_obj(line, self.current_flowsample) class Trace(object): def __init__(self, callable=None): self.within_datagram = WithinDatagram(self) self.within_flowsample = WithinFlowsample(self) self.currentState = self.within_datagram self.callable = callable def process(self, line): self.currentState.process(line)
normal
{ "blob_id": "395ff2e7c052b57548151fc71fad971c94ebceea", "index": 3974, "step-1": "<mask token>\n\n\nclass WithinDatagram(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n <mask token>\n\n\nclass WithinFlowsample(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n self.current_flowsample = None\n\n def re_init(self, flowsampleObj, datagramObj):\n self.current_datagram = datagramObj\n self.current_flowsample = flowsampleObj\n\n def process(self, line):\n if 'endSample' in line:\n self.current_datagram['flowSamples'][self.current_flowsample.id\n ] = self.current_flowsample.content\n self.Trace.currentState = self.Trace.within_datagram\n else:\n process_line_and_store_in_obj(line, self.current_flowsample)\n\n\nclass Trace(object):\n\n def __init__(self, callable=None):\n self.within_datagram = WithinDatagram(self)\n self.within_flowsample = WithinFlowsample(self)\n self.currentState = self.within_datagram\n self.callable = callable\n\n def process(self, line):\n self.currentState.process(line)\n", "step-2": "<mask token>\n\n\nclass FlowSample(Container):\n <mask token>\n\n def __init__(self):\n super(FlowSample, self).__init__(FlowSample.flowsample_counter())\n\n\n<mask token>\n\n\nclass WithinDatagram(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n\n def process(self, line):\n if 'startDatagram' in line:\n self.current_datagram = Datagram()\n elif 'endDatagram' in line:\n self.Trace.callable(self.current_datagram.content)\n elif 'startSample' in line:\n self.Trace.currentState = self.Trace.within_flowsample\n self.Trace.within_flowsample.re_init(FlowSample(), self.\n current_datagram)\n else:\n process_line_and_store_in_obj(line, self.current_datagram)\n\n\nclass WithinFlowsample(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n self.current_flowsample = None\n\n def re_init(self, flowsampleObj, datagramObj):\n self.current_datagram = datagramObj\n self.current_flowsample = flowsampleObj\n\n def process(self, line):\n if 'endSample' in line:\n self.current_datagram['flowSamples'][self.current_flowsample.id\n ] = self.current_flowsample.content\n self.Trace.currentState = self.Trace.within_datagram\n else:\n process_line_and_store_in_obj(line, self.current_flowsample)\n\n\nclass Trace(object):\n\n def __init__(self, callable=None):\n self.within_datagram = WithinDatagram(self)\n self.within_flowsample = WithinFlowsample(self)\n self.currentState = self.within_datagram\n self.callable = callable\n\n def process(self, line):\n self.currentState.process(line)\n", "step-3": "<mask token>\n\n\nclass Datagram(Container):\n <mask token>\n <mask token>\n\n\nclass FlowSample(Container):\n flowsample_counter = itertools.count().next\n\n def __init__(self):\n super(FlowSample, self).__init__(FlowSample.flowsample_counter())\n\n\n<mask token>\n\n\nclass WithinDatagram(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n\n def process(self, line):\n if 'startDatagram' in line:\n self.current_datagram = Datagram()\n elif 'endDatagram' in line:\n self.Trace.callable(self.current_datagram.content)\n elif 'startSample' in line:\n self.Trace.currentState = self.Trace.within_flowsample\n self.Trace.within_flowsample.re_init(FlowSample(), self.\n current_datagram)\n else:\n process_line_and_store_in_obj(line, self.current_datagram)\n\n\nclass WithinFlowsample(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n self.current_flowsample = None\n\n def re_init(self, flowsampleObj, datagramObj):\n self.current_datagram = datagramObj\n self.current_flowsample = flowsampleObj\n\n def process(self, line):\n if 'endSample' in line:\n self.current_datagram['flowSamples'][self.current_flowsample.id\n ] = self.current_flowsample.content\n self.Trace.currentState = self.Trace.within_datagram\n else:\n process_line_and_store_in_obj(line, self.current_flowsample)\n\n\nclass Trace(object):\n\n def __init__(self, callable=None):\n self.within_datagram = WithinDatagram(self)\n self.within_flowsample = WithinFlowsample(self)\n self.currentState = self.within_datagram\n self.callable = callable\n\n def process(self, line):\n self.currentState.process(line)\n", "step-4": "<mask token>\n\n\nclass Datagram(Container):\n datagram_counter = itertools.count().next\n\n def __init__(self):\n super(Datagram, self).__init__(Datagram.datagram_counter())\n self['flowSamples'] = {}\n\n\nclass FlowSample(Container):\n flowsample_counter = itertools.count().next\n\n def __init__(self):\n super(FlowSample, self).__init__(FlowSample.flowsample_counter())\n\n\n<mask token>\n\n\nclass WithinDatagram(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n\n def process(self, line):\n if 'startDatagram' in line:\n self.current_datagram = Datagram()\n elif 'endDatagram' in line:\n self.Trace.callable(self.current_datagram.content)\n elif 'startSample' in line:\n self.Trace.currentState = self.Trace.within_flowsample\n self.Trace.within_flowsample.re_init(FlowSample(), self.\n current_datagram)\n else:\n process_line_and_store_in_obj(line, self.current_datagram)\n\n\nclass WithinFlowsample(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n self.current_flowsample = None\n\n def re_init(self, flowsampleObj, datagramObj):\n self.current_datagram = datagramObj\n self.current_flowsample = flowsampleObj\n\n def process(self, line):\n if 'endSample' in line:\n self.current_datagram['flowSamples'][self.current_flowsample.id\n ] = self.current_flowsample.content\n self.Trace.currentState = self.Trace.within_datagram\n else:\n process_line_and_store_in_obj(line, self.current_flowsample)\n\n\nclass Trace(object):\n\n def __init__(self, callable=None):\n self.within_datagram = WithinDatagram(self)\n self.within_flowsample = WithinFlowsample(self)\n self.currentState = self.within_datagram\n self.callable = callable\n\n def process(self, line):\n self.currentState.process(line)\n", "step-5": "#@@---------------------------@@\n# Author: Chamil Jayasundara\n# Date: 5/18/17\n# Description: Extract SFLOW data from slow logs\n#@@---------------------------@@\n\nimport itertools\nfrom collections import defaultdict\n\n\"\"\"Flow Sample and Datagram Objects\"\"\"\n\n\nclass Container(object):\n\n def __init__(self, id):\n self.id = id\n self.content = defaultdict(int)\n\n def __getitem__(self, key):\n return self.content[key]\n\n def __setitem__(self, key, value):\n self.content[key] = value\n\n\nclass Datagram(Container):\n\n datagram_counter = itertools.count().next\n\n def __init__(self):\n super(Datagram, self).__init__(Datagram.datagram_counter())\n self['flowSamples'] = {}\n\n\nclass FlowSample(Container):\n\n flowsample_counter = itertools.count().next\n\n def __init__(self):\n super(FlowSample, self).__init__(FlowSample.flowsample_counter())\n\n\n#############################\n\"\"\"Data Extraction\"\"\"\n\ndef process_line_and_store_in_obj(line, obj):\n partition = line.partition(\" \")\n obj[partition[0]] = partition[2].rstrip()\n\n\n###State Machine Classses\nclass WithinDatagram(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n\n def process(self,line):\n if \"startDatagram\" in line:\n self.current_datagram = Datagram()\n\n elif \"endDatagram\" in line:\n self.Trace.callable(self.current_datagram.content)\n\n elif \"startSample\" in line:\n self.Trace.currentState = self.Trace.within_flowsample\n self.Trace.within_flowsample.re_init(FlowSample(), self.current_datagram)\n\n else:\n process_line_and_store_in_obj(line, self.current_datagram)\n\n\nclass WithinFlowsample(object):\n\n def __init__(self, traceObj):\n self.Trace = traceObj\n self.current_datagram = None\n self.current_flowsample = None\n\n def re_init(self, flowsampleObj, datagramObj):\n self.current_datagram = datagramObj\n self.current_flowsample = flowsampleObj\n\n def process(self,line):\n if \"endSample\" in line:\n self.current_datagram['flowSamples'][self.current_flowsample.id] = self.current_flowsample.content\n self.Trace.currentState = self.Trace.within_datagram\n\n else:\n process_line_and_store_in_obj(line, self.current_flowsample)\n\n\nclass Trace(object):\n\n def __init__(self, callable=None):\n self.within_datagram = WithinDatagram(self)\n self.within_flowsample = WithinFlowsample(self)\n self.currentState = self.within_datagram\n self.callable = callable\n\n def process(self, line):\n self.currentState.process(line)\n\n", "step-ids": [ 9, 12, 14, 16, 23 ] }
[ 9, 12, 14, 16, 23 ]
<|reserved_special_token_0|> class GenericPower(Entity): <|reserved_special_token_0|> def __init__(self, unique_id, entity_type=EntityType.find(100), name= 'Unnamed entity', state=STATE_UNKNOWN, state_value=None, last_checkin=0 ): Entity.__init__(self, unique_id, entity_type, name=name, state= state, state_value=state_value, last_checkin=last_checkin) def state_changed(self, state_message): Entity.state_changed(self, state_message) state = state_message[0] if state == 0: if 0 != self.state_value: self.set_state(STATE_OFF, 0) return True elif state == 1: if 1 != self.state_value: self.set_state(STATE_ON, 1) return True return False def control(self, controller, command, value=None): if command.id == COMMAND_ON.id: controller.send_message(self.unique_id, [chr(0), chr(1)]) self.log_command('Turning the power on') return elif command.id == COMMAND_OFF.id: controller.send_message(self.unique_id, [chr(0), chr(0)]) self.log_command('Turning the power off') return Entity.control(self, command, value=value) def describe_state(self): return str(self.state) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class GenericPower(Entity): """ This type of entites are able to report their states as logical on (0x01) or off (0x00) state, and accept commands to switch this state. """ def __init__(self, unique_id, entity_type=EntityType.find(100), name= 'Unnamed entity', state=STATE_UNKNOWN, state_value=None, last_checkin=0 ): Entity.__init__(self, unique_id, entity_type, name=name, state= state, state_value=state_value, last_checkin=last_checkin) def state_changed(self, state_message): Entity.state_changed(self, state_message) state = state_message[0] if state == 0: if 0 != self.state_value: self.set_state(STATE_OFF, 0) return True elif state == 1: if 1 != self.state_value: self.set_state(STATE_ON, 1) return True return False def control(self, controller, command, value=None): if command.id == COMMAND_ON.id: controller.send_message(self.unique_id, [chr(0), chr(1)]) self.log_command('Turning the power on') return elif command.id == COMMAND_OFF.id: controller.send_message(self.unique_id, [chr(0), chr(0)]) self.log_command('Turning the power off') return Entity.control(self, command, value=value) def describe_state(self): return str(self.state) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class GenericPower(Entity): """ This type of entites are able to report their states as logical on (0x01) or off (0x00) state, and accept commands to switch this state. """ def __init__(self, unique_id, entity_type=EntityType.find(100), name= 'Unnamed entity', state=STATE_UNKNOWN, state_value=None, last_checkin=0 ): Entity.__init__(self, unique_id, entity_type, name=name, state= state, state_value=state_value, last_checkin=last_checkin) def state_changed(self, state_message): Entity.state_changed(self, state_message) state = state_message[0] if state == 0: if 0 != self.state_value: self.set_state(STATE_OFF, 0) return True elif state == 1: if 1 != self.state_value: self.set_state(STATE_ON, 1) return True return False def control(self, controller, command, value=None): if command.id == COMMAND_ON.id: controller.send_message(self.unique_id, [chr(0), chr(1)]) self.log_command('Turning the power on') return elif command.id == COMMAND_OFF.id: controller.send_message(self.unique_id, [chr(0), chr(0)]) self.log_command('Turning the power off') return Entity.control(self, command, value=value) def describe_state(self): return str(self.state) EntityType.register(100, 'Power', GenericPower, [COMMAND_ON, COMMAND_OFF], '#99CC00', 'power.png') <|reserved_special_token_1|> <|reserved_special_token_0|> from entities import Entity, EntityType from entities import STATE_UNKNOWN, STATE_OFF, STATE_ON from entities import COMMAND_ON, COMMAND_OFF class GenericPower(Entity): """ This type of entites are able to report their states as logical on (0x01) or off (0x00) state, and accept commands to switch this state. """ def __init__(self, unique_id, entity_type=EntityType.find(100), name= 'Unnamed entity', state=STATE_UNKNOWN, state_value=None, last_checkin=0 ): Entity.__init__(self, unique_id, entity_type, name=name, state= state, state_value=state_value, last_checkin=last_checkin) def state_changed(self, state_message): Entity.state_changed(self, state_message) state = state_message[0] if state == 0: if 0 != self.state_value: self.set_state(STATE_OFF, 0) return True elif state == 1: if 1 != self.state_value: self.set_state(STATE_ON, 1) return True return False def control(self, controller, command, value=None): if command.id == COMMAND_ON.id: controller.send_message(self.unique_id, [chr(0), chr(1)]) self.log_command('Turning the power on') return elif command.id == COMMAND_OFF.id: controller.send_message(self.unique_id, [chr(0), chr(0)]) self.log_command('Turning the power off') return Entity.control(self, command, value=value) def describe_state(self): return str(self.state) EntityType.register(100, 'Power', GenericPower, [COMMAND_ON, COMMAND_OFF], '#99CC00', 'power.png') <|reserved_special_token_1|> ''' Created on Dec 2, 2013 A reference entity implementation for Power devices that can be controlled via RF communication. @author: rycus ''' from entities import Entity, EntityType from entities import STATE_UNKNOWN, STATE_OFF, STATE_ON from entities import COMMAND_ON, COMMAND_OFF class GenericPower(Entity): ''' This type of entites are able to report their states as logical on (0x01) or off (0x00) state, and accept commands to switch this state. ''' def __init__(self, unique_id, entity_type=EntityType.find(100), name='Unnamed entity', state=STATE_UNKNOWN, state_value=None, last_checkin=0): Entity.__init__(self, unique_id, entity_type, name=name, state=state, state_value=state_value, last_checkin=last_checkin) def state_changed(self, state_message): Entity.state_changed(self, state_message) state = state_message[0] if state == 0x00: if 0 != self.state_value: self.set_state(STATE_OFF, 0) return True elif state == 0x01: if 1 != self.state_value: self.set_state(STATE_ON, 1) return True return False def control(self, controller, command, value=None): if command.id == COMMAND_ON.id: controller.send_message(self.unique_id, [ chr(0x00), chr(0x01) ]) self.log_command('Turning the power on') return elif command.id == COMMAND_OFF.id: controller.send_message(self.unique_id, [ chr(0x00), chr(0x00) ]) self.log_command('Turning the power off') return Entity.control(self, command, value=value) def describe_state(self): return str(self.state) # register type EntityType.register(100, 'Power', GenericPower, [COMMAND_ON, COMMAND_OFF], '#99CC00', 'power.png')
flexible
{ "blob_id": "18e76df1693d4fc27620a0cf491c33197caa5d15", "index": 4055, "step-1": "<mask token>\n\n\nclass GenericPower(Entity):\n <mask token>\n\n def __init__(self, unique_id, entity_type=EntityType.find(100), name=\n 'Unnamed entity', state=STATE_UNKNOWN, state_value=None, last_checkin=0\n ):\n Entity.__init__(self, unique_id, entity_type, name=name, state=\n state, state_value=state_value, last_checkin=last_checkin)\n\n def state_changed(self, state_message):\n Entity.state_changed(self, state_message)\n state = state_message[0]\n if state == 0:\n if 0 != self.state_value:\n self.set_state(STATE_OFF, 0)\n return True\n elif state == 1:\n if 1 != self.state_value:\n self.set_state(STATE_ON, 1)\n return True\n return False\n\n def control(self, controller, command, value=None):\n if command.id == COMMAND_ON.id:\n controller.send_message(self.unique_id, [chr(0), chr(1)])\n self.log_command('Turning the power on')\n return\n elif command.id == COMMAND_OFF.id:\n controller.send_message(self.unique_id, [chr(0), chr(0)])\n self.log_command('Turning the power off')\n return\n Entity.control(self, command, value=value)\n\n def describe_state(self):\n return str(self.state)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass GenericPower(Entity):\n \"\"\" This type of entites are able to report their states as logical\n on (0x01) or off (0x00) state, and accept commands to switch this state. \"\"\"\n\n def __init__(self, unique_id, entity_type=EntityType.find(100), name=\n 'Unnamed entity', state=STATE_UNKNOWN, state_value=None, last_checkin=0\n ):\n Entity.__init__(self, unique_id, entity_type, name=name, state=\n state, state_value=state_value, last_checkin=last_checkin)\n\n def state_changed(self, state_message):\n Entity.state_changed(self, state_message)\n state = state_message[0]\n if state == 0:\n if 0 != self.state_value:\n self.set_state(STATE_OFF, 0)\n return True\n elif state == 1:\n if 1 != self.state_value:\n self.set_state(STATE_ON, 1)\n return True\n return False\n\n def control(self, controller, command, value=None):\n if command.id == COMMAND_ON.id:\n controller.send_message(self.unique_id, [chr(0), chr(1)])\n self.log_command('Turning the power on')\n return\n elif command.id == COMMAND_OFF.id:\n controller.send_message(self.unique_id, [chr(0), chr(0)])\n self.log_command('Turning the power off')\n return\n Entity.control(self, command, value=value)\n\n def describe_state(self):\n return str(self.state)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass GenericPower(Entity):\n \"\"\" This type of entites are able to report their states as logical\n on (0x01) or off (0x00) state, and accept commands to switch this state. \"\"\"\n\n def __init__(self, unique_id, entity_type=EntityType.find(100), name=\n 'Unnamed entity', state=STATE_UNKNOWN, state_value=None, last_checkin=0\n ):\n Entity.__init__(self, unique_id, entity_type, name=name, state=\n state, state_value=state_value, last_checkin=last_checkin)\n\n def state_changed(self, state_message):\n Entity.state_changed(self, state_message)\n state = state_message[0]\n if state == 0:\n if 0 != self.state_value:\n self.set_state(STATE_OFF, 0)\n return True\n elif state == 1:\n if 1 != self.state_value:\n self.set_state(STATE_ON, 1)\n return True\n return False\n\n def control(self, controller, command, value=None):\n if command.id == COMMAND_ON.id:\n controller.send_message(self.unique_id, [chr(0), chr(1)])\n self.log_command('Turning the power on')\n return\n elif command.id == COMMAND_OFF.id:\n controller.send_message(self.unique_id, [chr(0), chr(0)])\n self.log_command('Turning the power off')\n return\n Entity.control(self, command, value=value)\n\n def describe_state(self):\n return str(self.state)\n\n\nEntityType.register(100, 'Power', GenericPower, [COMMAND_ON, COMMAND_OFF],\n '#99CC00', 'power.png')\n", "step-4": "<mask token>\nfrom entities import Entity, EntityType\nfrom entities import STATE_UNKNOWN, STATE_OFF, STATE_ON\nfrom entities import COMMAND_ON, COMMAND_OFF\n\n\nclass GenericPower(Entity):\n \"\"\" This type of entites are able to report their states as logical\n on (0x01) or off (0x00) state, and accept commands to switch this state. \"\"\"\n\n def __init__(self, unique_id, entity_type=EntityType.find(100), name=\n 'Unnamed entity', state=STATE_UNKNOWN, state_value=None, last_checkin=0\n ):\n Entity.__init__(self, unique_id, entity_type, name=name, state=\n state, state_value=state_value, last_checkin=last_checkin)\n\n def state_changed(self, state_message):\n Entity.state_changed(self, state_message)\n state = state_message[0]\n if state == 0:\n if 0 != self.state_value:\n self.set_state(STATE_OFF, 0)\n return True\n elif state == 1:\n if 1 != self.state_value:\n self.set_state(STATE_ON, 1)\n return True\n return False\n\n def control(self, controller, command, value=None):\n if command.id == COMMAND_ON.id:\n controller.send_message(self.unique_id, [chr(0), chr(1)])\n self.log_command('Turning the power on')\n return\n elif command.id == COMMAND_OFF.id:\n controller.send_message(self.unique_id, [chr(0), chr(0)])\n self.log_command('Turning the power off')\n return\n Entity.control(self, command, value=value)\n\n def describe_state(self):\n return str(self.state)\n\n\nEntityType.register(100, 'Power', GenericPower, [COMMAND_ON, COMMAND_OFF],\n '#99CC00', 'power.png')\n", "step-5": "'''\nCreated on Dec 2, 2013\n\nA reference entity implementation for Power devices\nthat can be controlled via RF communication.\n\n@author: rycus\n'''\n\nfrom entities import Entity, EntityType\nfrom entities import STATE_UNKNOWN, STATE_OFF, STATE_ON\nfrom entities import COMMAND_ON, COMMAND_OFF\n\nclass GenericPower(Entity):\n ''' This type of entites are able to report their states as logical\n on (0x01) or off (0x00) state, and accept commands to switch this state. '''\n \n def __init__(self, unique_id, entity_type=EntityType.find(100), name='Unnamed entity', state=STATE_UNKNOWN, state_value=None, last_checkin=0):\n Entity.__init__(self, unique_id, entity_type, name=name, state=state, state_value=state_value, last_checkin=last_checkin)\n\n def state_changed(self, state_message):\n Entity.state_changed(self, state_message)\n \n state = state_message[0]\n if state == 0x00:\n if 0 != self.state_value:\n self.set_state(STATE_OFF, 0)\n return True\n elif state == 0x01:\n if 1 != self.state_value:\n self.set_state(STATE_ON, 1)\n return True\n \n return False\n\n def control(self, controller, command, value=None):\n if command.id == COMMAND_ON.id:\n controller.send_message(self.unique_id, [ chr(0x00), chr(0x01) ])\n self.log_command('Turning the power on')\n return\n elif command.id == COMMAND_OFF.id:\n controller.send_message(self.unique_id, [ chr(0x00), chr(0x00) ])\n self.log_command('Turning the power off')\n return \n \n Entity.control(self, command, value=value)\n \n def describe_state(self):\n return str(self.state)\n\n# register type\nEntityType.register(100, 'Power', GenericPower, [COMMAND_ON, COMMAND_OFF], '#99CC00', 'power.png')\n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def test_duration(): ins = main.convert() names = ins.multiconvert() for name in names: induration, outduration = ins.ffprobe(name[0], name[1]) assert induration == approx(outduration) induration, outduration = ins.ffprobe(name[0], name[2]) assert induration == approx(outduration) print('All files are converted successfully!') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def test_duration(): ins = main.convert() names = ins.multiconvert() for name in names: induration, outduration = ins.ffprobe(name[0], name[1]) assert induration == approx(outduration) induration, outduration = ins.ffprobe(name[0], name[2]) assert induration == approx(outduration) print('All files are converted successfully!') if __name__ == '__main__': test_duration() <|reserved_special_token_1|> import main from pytest import approx def test_duration(): ins = main.convert() names = ins.multiconvert() for name in names: induration, outduration = ins.ffprobe(name[0], name[1]) assert induration == approx(outduration) induration, outduration = ins.ffprobe(name[0], name[2]) assert induration == approx(outduration) print('All files are converted successfully!') if __name__ == '__main__': test_duration() <|reserved_special_token_1|> import main from pytest import approx def test_duration(): ins = main.convert() names = ins.multiconvert() for name in names: induration, outduration = ins.ffprobe(name[0], name[1]) assert induration == approx(outduration) induration, outduration = ins.ffprobe(name[0], name[2]) assert induration == approx(outduration) print("All files are converted successfully!") if __name__ == '__main__': test_duration()
flexible
{ "blob_id": "92c247b827d2ca4dce9b631a2c09f2800aabe216", "index": 6129, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef test_duration():\n ins = main.convert()\n names = ins.multiconvert()\n for name in names:\n induration, outduration = ins.ffprobe(name[0], name[1])\n assert induration == approx(outduration)\n induration, outduration = ins.ffprobe(name[0], name[2])\n assert induration == approx(outduration)\n print('All files are converted successfully!')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef test_duration():\n ins = main.convert()\n names = ins.multiconvert()\n for name in names:\n induration, outduration = ins.ffprobe(name[0], name[1])\n assert induration == approx(outduration)\n induration, outduration = ins.ffprobe(name[0], name[2])\n assert induration == approx(outduration)\n print('All files are converted successfully!')\n\n\nif __name__ == '__main__':\n test_duration()\n", "step-4": "import main\nfrom pytest import approx\n\n\ndef test_duration():\n ins = main.convert()\n names = ins.multiconvert()\n for name in names:\n induration, outduration = ins.ffprobe(name[0], name[1])\n assert induration == approx(outduration)\n induration, outduration = ins.ffprobe(name[0], name[2])\n assert induration == approx(outduration)\n print('All files are converted successfully!')\n\n\nif __name__ == '__main__':\n test_duration()\n", "step-5": "import main\nfrom pytest import approx\n\ndef test_duration():\n\n ins = main.convert()\n names = ins.multiconvert()\n for name in names:\n induration, outduration = ins.ffprobe(name[0], name[1])\n assert induration == approx(outduration)\n induration, outduration = ins.ffprobe(name[0], name[2])\n assert induration == approx(outduration)\n print(\"All files are converted successfully!\")\n\nif __name__ == '__main__':\n test_duration()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Ui_Dialog(object): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Ui_Dialog(object): def setupUi(self, Dialog): Dialog.setObjectName('Dialog') Dialog.resize(670, 483) self.pushButton = QtWidgets.QPushButton(Dialog) self.pushButton.setGeometry(QtCore.QRect(190, 240, 101, 31)) self.pushButton.setObjectName('pushButton') self.lineEdit = QtWidgets.QLineEdit(Dialog) self.lineEdit.setGeometry(QtCore.QRect(260, 150, 201, 41)) self.lineEdit.setObjectName('lineEdit') self.label_2 = QtWidgets.QLabel(Dialog) self.label_2.setGeometry(QtCore.QRect(100, 330, 421, 91)) font = QtGui.QFont() font.setPointSize(18) font.setBold(True) font.setWeight(75) self.label_2.setFont(font) self.label_2.setText('') self.label_2.setObjectName('label_2') self.label_1 = QtWidgets.QLabel(Dialog) self.label_1.setGeometry(QtCore.QRect(90, 150, 81, 41)) self.label_1.setObjectName('label_1') self.retranslateUi(Dialog) QtCore.QMetaObject.connectSlotsByName(Dialog) <|reserved_special_token_0|> <|reserved_special_token_1|> from PyQt5 import QtCore, QtGui, QtWidgets class Ui_Dialog(object): def setupUi(self, Dialog): Dialog.setObjectName('Dialog') Dialog.resize(670, 483) self.pushButton = QtWidgets.QPushButton(Dialog) self.pushButton.setGeometry(QtCore.QRect(190, 240, 101, 31)) self.pushButton.setObjectName('pushButton') self.lineEdit = QtWidgets.QLineEdit(Dialog) self.lineEdit.setGeometry(QtCore.QRect(260, 150, 201, 41)) self.lineEdit.setObjectName('lineEdit') self.label_2 = QtWidgets.QLabel(Dialog) self.label_2.setGeometry(QtCore.QRect(100, 330, 421, 91)) font = QtGui.QFont() font.setPointSize(18) font.setBold(True) font.setWeight(75) self.label_2.setFont(font) self.label_2.setText('') self.label_2.setObjectName('label_2') self.label_1 = QtWidgets.QLabel(Dialog) self.label_1.setGeometry(QtCore.QRect(90, 150, 81, 41)) self.label_1.setObjectName('label_1') self.retranslateUi(Dialog) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): _translate = QtCore.QCoreApplication.translate Dialog.setWindowTitle(_translate('Dialog', 'Dialog')) self.pushButton.setText(_translate('Dialog', 'OK')) self.label_1.setText(_translate('Dialog', 'Name')) <|reserved_special_token_1|> # -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'test1.ui' # # Created by: PyQt5 UI code generator 5.7 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_Dialog(object): def setupUi(self, Dialog): Dialog.setObjectName("Dialog") Dialog.resize(670, 483) self.pushButton = QtWidgets.QPushButton(Dialog) self.pushButton.setGeometry(QtCore.QRect(190, 240, 101, 31)) self.pushButton.setObjectName("pushButton") self.lineEdit = QtWidgets.QLineEdit(Dialog) self.lineEdit.setGeometry(QtCore.QRect(260, 150, 201, 41)) self.lineEdit.setObjectName("lineEdit") self.label_2 = QtWidgets.QLabel(Dialog) self.label_2.setGeometry(QtCore.QRect(100, 330, 421, 91)) font = QtGui.QFont() font.setPointSize(18) font.setBold(True) font.setWeight(75) self.label_2.setFont(font) self.label_2.setText("") self.label_2.setObjectName("label_2") self.label_1 = QtWidgets.QLabel(Dialog) self.label_1.setGeometry(QtCore.QRect(90, 150, 81, 41)) self.label_1.setObjectName("label_1") self.retranslateUi(Dialog) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): _translate = QtCore.QCoreApplication.translate Dialog.setWindowTitle(_translate("Dialog", "Dialog")) self.pushButton.setText(_translate("Dialog", "OK")) self.label_1.setText(_translate("Dialog", "Name"))
flexible
{ "blob_id": "3222dd7c2d19d86f2e085cb489ab4a48307ba132", "index": 7458, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Ui_Dialog(object):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Ui_Dialog(object):\n\n def setupUi(self, Dialog):\n Dialog.setObjectName('Dialog')\n Dialog.resize(670, 483)\n self.pushButton = QtWidgets.QPushButton(Dialog)\n self.pushButton.setGeometry(QtCore.QRect(190, 240, 101, 31))\n self.pushButton.setObjectName('pushButton')\n self.lineEdit = QtWidgets.QLineEdit(Dialog)\n self.lineEdit.setGeometry(QtCore.QRect(260, 150, 201, 41))\n self.lineEdit.setObjectName('lineEdit')\n self.label_2 = QtWidgets.QLabel(Dialog)\n self.label_2.setGeometry(QtCore.QRect(100, 330, 421, 91))\n font = QtGui.QFont()\n font.setPointSize(18)\n font.setBold(True)\n font.setWeight(75)\n self.label_2.setFont(font)\n self.label_2.setText('')\n self.label_2.setObjectName('label_2')\n self.label_1 = QtWidgets.QLabel(Dialog)\n self.label_1.setGeometry(QtCore.QRect(90, 150, 81, 41))\n self.label_1.setObjectName('label_1')\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n <mask token>\n", "step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_Dialog(object):\n\n def setupUi(self, Dialog):\n Dialog.setObjectName('Dialog')\n Dialog.resize(670, 483)\n self.pushButton = QtWidgets.QPushButton(Dialog)\n self.pushButton.setGeometry(QtCore.QRect(190, 240, 101, 31))\n self.pushButton.setObjectName('pushButton')\n self.lineEdit = QtWidgets.QLineEdit(Dialog)\n self.lineEdit.setGeometry(QtCore.QRect(260, 150, 201, 41))\n self.lineEdit.setObjectName('lineEdit')\n self.label_2 = QtWidgets.QLabel(Dialog)\n self.label_2.setGeometry(QtCore.QRect(100, 330, 421, 91))\n font = QtGui.QFont()\n font.setPointSize(18)\n font.setBold(True)\n font.setWeight(75)\n self.label_2.setFont(font)\n self.label_2.setText('')\n self.label_2.setObjectName('label_2')\n self.label_1 = QtWidgets.QLabel(Dialog)\n self.label_1.setGeometry(QtCore.QRect(90, 150, 81, 41))\n self.label_1.setObjectName('label_1')\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate('Dialog', 'Dialog'))\n self.pushButton.setText(_translate('Dialog', 'OK'))\n self.label_1.setText(_translate('Dialog', 'Name'))\n", "step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'test1.ui'\n#\n# Created by: PyQt5 UI code generator 5.7\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_Dialog(object):\n def setupUi(self, Dialog):\n Dialog.setObjectName(\"Dialog\")\n Dialog.resize(670, 483)\n self.pushButton = QtWidgets.QPushButton(Dialog)\n self.pushButton.setGeometry(QtCore.QRect(190, 240, 101, 31))\n self.pushButton.setObjectName(\"pushButton\")\n self.lineEdit = QtWidgets.QLineEdit(Dialog)\n self.lineEdit.setGeometry(QtCore.QRect(260, 150, 201, 41))\n self.lineEdit.setObjectName(\"lineEdit\")\n self.label_2 = QtWidgets.QLabel(Dialog)\n self.label_2.setGeometry(QtCore.QRect(100, 330, 421, 91))\n font = QtGui.QFont()\n font.setPointSize(18)\n font.setBold(True)\n font.setWeight(75)\n self.label_2.setFont(font)\n self.label_2.setText(\"\")\n self.label_2.setObjectName(\"label_2\")\n self.label_1 = QtWidgets.QLabel(Dialog)\n self.label_1.setGeometry(QtCore.QRect(90, 150, 81, 41))\n self.label_1.setObjectName(\"label_1\")\n\n self.retranslateUi(Dialog)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Dialog\"))\n self.pushButton.setText(_translate(\"Dialog\", \"OK\"))\n self.label_1.setText(_translate(\"Dialog\", \"Name\"))\n\n", "step-ids": [ 0, 1, 2, 4, 5 ] }
[ 0, 1, 2, 4, 5 ]
from math import pi from root_regula_falsi import * r = 1.0 ρs = 200.0 ρw = 1000.0 def f(h): Vw = 4*pi*r**3/3 - pi*h**2/3*(3*r - h) # displaced volume of water Vs = 4*pi*r**3/3 return ρw*Vw - ρs*Vs xr = root_regula_falsi(f, 0.0, 2*r)
normal
{ "blob_id": "3e7d2bacb15c39658ef5044685b73068deb1c145", "index": 6060, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef f(h):\n Vw = 4 * pi * r ** 3 / 3 - pi * h ** 2 / 3 * (3 * r - h)\n Vs = 4 * pi * r ** 3 / 3\n return ρw * Vw - ρs * Vs\n\n\n<mask token>\n", "step-3": "<mask token>\nr = 1.0\nρs = 200.0\nρw = 1000.0\n\n\ndef f(h):\n Vw = 4 * pi * r ** 3 / 3 - pi * h ** 2 / 3 * (3 * r - h)\n Vs = 4 * pi * r ** 3 / 3\n return ρw * Vw - ρs * Vs\n\n\nxr = root_regula_falsi(f, 0.0, 2 * r)\n", "step-4": "from math import pi\nfrom root_regula_falsi import *\nr = 1.0\nρs = 200.0\nρw = 1000.0\n\n\ndef f(h):\n Vw = 4 * pi * r ** 3 / 3 - pi * h ** 2 / 3 * (3 * r - h)\n Vs = 4 * pi * r ** 3 / 3\n return ρw * Vw - ρs * Vs\n\n\nxr = root_regula_falsi(f, 0.0, 2 * r)\n", "step-5": "from math import pi\nfrom root_regula_falsi import *\n\nr = 1.0\nρs = 200.0\nρw = 1000.0\n\ndef f(h):\n Vw = 4*pi*r**3/3 - pi*h**2/3*(3*r - h) # displaced volume of water\n Vs = 4*pi*r**3/3\n return ρw*Vw - ρs*Vs\n\n\nxr = root_regula_falsi(f, 0.0, 2*r)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def loadDataFrame(fileName, fileSchema): return spark.read.format('csv').schema(fileSchema).option('header', 'true' ).option('mode', 'DROPMALFORMED').csv('/FileStore/tables/%s' % fileName ) <|reserved_special_token_0|> def top_movies(user_id, n): """ This function returns the top 'n' movies that user has not seen yet but might like """ a = unique_movies.alias('a') watched_movies = indexed.filter(indexed['userId'] == user_id).select( 'title_new') b = watched_movies.alias('b') total_movies = a.join(b, a.title_new == b.title_new, how='left') remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a .title_new).distinct() remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id))) recommendations = rec_model.transform(remaining_movies).orderBy( 'prediction', ascending=False).limit(n) movie_title = IndexToString(inputCol='title_new', outputCol='title', labels=model.labels) final_recommendations = movie_title.transform(recommendations) return final_recommendations.show(n, False) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def loadDataFrame(fileName, fileSchema): return spark.read.format('csv').schema(fileSchema).option('header', 'true' ).option('mode', 'DROPMALFORMED').csv('/FileStore/tables/%s' % fileName ) <|reserved_special_token_0|> print((df.count(), len(df.columns))) df.printSchema() df.orderBy(rand()).show(10, False) df.groupBy('userId').count().orderBy('count', ascending=False).show(10, False) df.groupBy('userId').count().orderBy('count', ascending=True).show(10, False) df.groupBy('title').count().orderBy('count', ascending=False).show(10, False) df.groupBy('title').count().orderBy('count', ascending=True).show(10, False) <|reserved_special_token_0|> indexed.show(10) indexed.groupBy('title_new').count().orderBy('count', ascending=False).show( 10, False) <|reserved_special_token_0|> train.count() test.count() <|reserved_special_token_0|> predicted_ratings.printSchema() predicted_ratings.orderBy(rand()).show(10) <|reserved_special_token_0|> print(rmse) <|reserved_special_token_0|> unique_movies.count() <|reserved_special_token_0|> watched_movies.count() <|reserved_special_token_0|> total_movies.show(10, False) <|reserved_special_token_0|> remaining_movies.count() <|reserved_special_token_0|> remaining_movies.show(10, False) <|reserved_special_token_0|> recommendations.show(5, False) <|reserved_special_token_0|> final_recommendations.show(10, False) def top_movies(user_id, n): """ This function returns the top 'n' movies that user has not seen yet but might like """ a = unique_movies.alias('a') watched_movies = indexed.filter(indexed['userId'] == user_id).select( 'title_new') b = watched_movies.alias('b') total_movies = a.join(b, a.title_new == b.title_new, how='left') remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a .title_new).distinct() remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id))) recommendations = rec_model.transform(remaining_movies).orderBy( 'prediction', ascending=False).limit(n) movie_title = IndexToString(inputCol='title_new', outputCol='title', labels=model.labels) final_recommendations = movie_title.transform(recommendations) return final_recommendations.show(n, False) top_movies(85, 10) <|reserved_special_token_1|> <|reserved_special_token_0|> spark = SparkSession.builder.appName('rc').getOrCreate() <|reserved_special_token_0|> def loadDataFrame(fileName, fileSchema): return spark.read.format('csv').schema(fileSchema).option('header', 'true' ).option('mode', 'DROPMALFORMED').csv('/FileStore/tables/%s' % fileName ) <|reserved_special_token_0|> movieRatingSchema = StructType([StructField('userId', IntegerType(), True), StructField('movieId', IntegerType(), True), StructField('rating', FloatType(), True), StructField('timestamp', StringType(), True)]) movieSchema = StructType([StructField('movieId', IntegerType(), True), StructField('title', StringType(), True), StructField('genres', StringType(), True)]) MovieRatingsDF = loadDataFrame('ratings.csv', movieRatingSchema).cache() MoviesDF = loadDataFrame('movies.csv', movieSchema).cache() df = MovieRatingsDF.join(MoviesDF, 'movieId').select(['userId', 'title', 'rating']) print((df.count(), len(df.columns))) df.printSchema() df.orderBy(rand()).show(10, False) df.groupBy('userId').count().orderBy('count', ascending=False).show(10, False) df.groupBy('userId').count().orderBy('count', ascending=True).show(10, False) df.groupBy('title').count().orderBy('count', ascending=False).show(10, False) df.groupBy('title').count().orderBy('count', ascending=True).show(10, False) <|reserved_special_token_0|> stringIndexer = StringIndexer(inputCol='title', outputCol='title_new') model = stringIndexer.fit(df) indexed = model.transform(df) indexed.show(10) indexed.groupBy('title_new').count().orderBy('count', ascending=False).show( 10, False) train, test = indexed.randomSplit([0.75, 0.25]) train.count() test.count() <|reserved_special_token_0|> rec = ALS(maxIter=10, regParam=0.01, userCol='userId', itemCol='title_new', ratingCol='rating', nonnegative=True, coldStartStrategy='drop') rec_model = rec.fit(train) predicted_ratings = rec_model.transform(test) predicted_ratings.printSchema() predicted_ratings.orderBy(rand()).show(10) <|reserved_special_token_0|> evaluator = RegressionEvaluator(metricName='rmse', predictionCol= 'prediction', labelCol='rating') rmse = evaluator.evaluate(predicted_ratings) print(rmse) unique_movies = indexed.select('title_new').distinct() unique_movies.count() a = unique_movies.alias('a') user_id = 85 watched_movies = indexed.filter(indexed['userId'] == user_id).select( 'title_new').distinct() watched_movies.count() b = watched_movies.alias('b') total_movies = a.join(b, a.title_new == b.title_new, how='left') total_movies.show(10, False) remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a .title_new).distinct() remaining_movies.count() remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id))) remaining_movies.show(10, False) recommendations = rec_model.transform(remaining_movies).orderBy('prediction', ascending=False) recommendations.show(5, False) movie_title = IndexToString(inputCol='title_new', outputCol='title', labels =model.labels) final_recommendations = movie_title.transform(recommendations) final_recommendations.show(10, False) def top_movies(user_id, n): """ This function returns the top 'n' movies that user has not seen yet but might like """ a = unique_movies.alias('a') watched_movies = indexed.filter(indexed['userId'] == user_id).select( 'title_new') b = watched_movies.alias('b') total_movies = a.join(b, a.title_new == b.title_new, how='left') remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a .title_new).distinct() remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id))) recommendations = rec_model.transform(remaining_movies).orderBy( 'prediction', ascending=False).limit(n) movie_title = IndexToString(inputCol='title_new', outputCol='title', labels=model.labels) final_recommendations = movie_title.transform(recommendations) return final_recommendations.show(n, False) top_movies(85, 10) <|reserved_special_token_1|> from pyspark.sql import SparkSession spark = SparkSession.builder.appName('rc').getOrCreate() from pyspark.sql.functions import * def loadDataFrame(fileName, fileSchema): return spark.read.format('csv').schema(fileSchema).option('header', 'true' ).option('mode', 'DROPMALFORMED').csv('/FileStore/tables/%s' % fileName ) from pyspark.sql.types import * movieRatingSchema = StructType([StructField('userId', IntegerType(), True), StructField('movieId', IntegerType(), True), StructField('rating', FloatType(), True), StructField('timestamp', StringType(), True)]) movieSchema = StructType([StructField('movieId', IntegerType(), True), StructField('title', StringType(), True), StructField('genres', StringType(), True)]) MovieRatingsDF = loadDataFrame('ratings.csv', movieRatingSchema).cache() MoviesDF = loadDataFrame('movies.csv', movieSchema).cache() df = MovieRatingsDF.join(MoviesDF, 'movieId').select(['userId', 'title', 'rating']) print((df.count(), len(df.columns))) df.printSchema() df.orderBy(rand()).show(10, False) df.groupBy('userId').count().orderBy('count', ascending=False).show(10, False) df.groupBy('userId').count().orderBy('count', ascending=True).show(10, False) df.groupBy('title').count().orderBy('count', ascending=False).show(10, False) df.groupBy('title').count().orderBy('count', ascending=True).show(10, False) from pyspark.ml.feature import StringIndexer, IndexToString stringIndexer = StringIndexer(inputCol='title', outputCol='title_new') model = stringIndexer.fit(df) indexed = model.transform(df) indexed.show(10) indexed.groupBy('title_new').count().orderBy('count', ascending=False).show( 10, False) train, test = indexed.randomSplit([0.75, 0.25]) train.count() test.count() from pyspark.ml.recommendation import ALS rec = ALS(maxIter=10, regParam=0.01, userCol='userId', itemCol='title_new', ratingCol='rating', nonnegative=True, coldStartStrategy='drop') rec_model = rec.fit(train) predicted_ratings = rec_model.transform(test) predicted_ratings.printSchema() predicted_ratings.orderBy(rand()).show(10) from pyspark.ml.evaluation import RegressionEvaluator evaluator = RegressionEvaluator(metricName='rmse', predictionCol= 'prediction', labelCol='rating') rmse = evaluator.evaluate(predicted_ratings) print(rmse) unique_movies = indexed.select('title_new').distinct() unique_movies.count() a = unique_movies.alias('a') user_id = 85 watched_movies = indexed.filter(indexed['userId'] == user_id).select( 'title_new').distinct() watched_movies.count() b = watched_movies.alias('b') total_movies = a.join(b, a.title_new == b.title_new, how='left') total_movies.show(10, False) remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a .title_new).distinct() remaining_movies.count() remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id))) remaining_movies.show(10, False) recommendations = rec_model.transform(remaining_movies).orderBy('prediction', ascending=False) recommendations.show(5, False) movie_title = IndexToString(inputCol='title_new', outputCol='title', labels =model.labels) final_recommendations = movie_title.transform(recommendations) final_recommendations.show(10, False) def top_movies(user_id, n): """ This function returns the top 'n' movies that user has not seen yet but might like """ a = unique_movies.alias('a') watched_movies = indexed.filter(indexed['userId'] == user_id).select( 'title_new') b = watched_movies.alias('b') total_movies = a.join(b, a.title_new == b.title_new, how='left') remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a .title_new).distinct() remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id))) recommendations = rec_model.transform(remaining_movies).orderBy( 'prediction', ascending=False).limit(n) movie_title = IndexToString(inputCol='title_new', outputCol='title', labels=model.labels) final_recommendations = movie_title.transform(recommendations) return final_recommendations.show(n, False) top_movies(85, 10) <|reserved_special_token_1|> # Databricks notebook source #import and create sparksession object from pyspark.sql import SparkSession spark=SparkSession.builder.appName('rc').getOrCreate() # COMMAND ---------- #import the required functions and libraries from pyspark.sql.functions import * # COMMAND ---------- # Convert csv file to Spark DataFrame (Databricks version) def loadDataFrame(fileName, fileSchema): return (spark.read.format("csv") .schema(fileSchema) .option("header", "true") .option("mode", "DROPMALFORMED") .csv("/FileStore/tables/%s" % (fileName))) # COMMAND ---------- from pyspark.sql.types import * movieRatingSchema = StructType([ StructField("userId", IntegerType(), True), StructField("movieId", IntegerType(), True), StructField("rating", FloatType(), True), StructField("timestamp", StringType(), True)]) movieSchema = StructType([ StructField("movieId", IntegerType(), True), StructField("title", StringType(), True), StructField("genres", StringType(), True)]) MovieRatingsDF = loadDataFrame("ratings.csv", movieRatingSchema).cache() MoviesDF = loadDataFrame("movies.csv", movieSchema).cache() # COMMAND ---------- #load the dataset and create sprk dataframe df = MovieRatingsDF.join(MoviesDF, 'movieId').select(['userId', 'title', 'rating']) #df=spark.read.csv('movie_ratings_df.csv',inferSchema=True,header=True) # COMMAND ---------- #validate the shape of the data print((df.count(),len(df.columns))) # COMMAND ---------- #check columns in dataframe df.printSchema() # COMMAND ---------- #validate few rows of dataframe in random order df.orderBy(rand()).show(10,False) # COMMAND ---------- #check number of ratings by each user df.groupBy('userId').count().orderBy('count',ascending=False).show(10,False) # COMMAND ---------- #check number of ratings by each user df.groupBy('userId').count().orderBy('count',ascending=True).show(10,False) # COMMAND ---------- #number of times movie been rated df.groupBy('title').count().orderBy('count',ascending=False).show(10,False) # COMMAND ---------- df.groupBy('title').count().orderBy('count',ascending=True).show(10,False) # COMMAND ---------- #import String indexer to convert string values to numeric values from pyspark.ml.feature import StringIndexer,IndexToString # COMMAND ---------- #creating string indexer to convert the movie title column values into numerical values stringIndexer = StringIndexer(inputCol="title", outputCol="title_new") # COMMAND ---------- #applying stringindexer object on dataframe movie title column model = stringIndexer.fit(df) # COMMAND ---------- #creating new dataframe with transformed values indexed = model.transform(df) # COMMAND ---------- #validate the numerical title values indexed.show(10) # COMMAND ---------- #number of times each numerical movie title has been rated indexed.groupBy('title_new').count().orderBy('count',ascending=False).show(10,False) # COMMAND ---------- #split the data into training and test datatset train,test=indexed.randomSplit([0.75,0.25]) # COMMAND ---------- #count number of records in train set train.count() # COMMAND ---------- #count number of records in test set test.count() # COMMAND ---------- #import ALS recommender function from pyspark ml library from pyspark.ml.recommendation import ALS # COMMAND ---------- #Training the recommender model using train datatset rec=ALS(maxIter=10,regParam=0.01,userCol='userId',itemCol='title_new',ratingCol='rating',nonnegative=True,coldStartStrategy="drop") # COMMAND ---------- #fit the model on train set rec_model=rec.fit(train) # COMMAND ---------- #making predictions on test set predicted_ratings=rec_model.transform(test) # COMMAND ---------- #columns in predicted ratings dataframe predicted_ratings.printSchema() # COMMAND ---------- #predicted vs actual ratings for test set predicted_ratings.orderBy(rand()).show(10) # COMMAND ---------- #importing Regression Evaluator to measure RMSE from pyspark.ml.evaluation import RegressionEvaluator # COMMAND ---------- #create Regressor evaluator object for measuring accuracy evaluator=RegressionEvaluator(metricName='rmse',predictionCol='prediction',labelCol='rating') # COMMAND ---------- #apply the RE on predictions dataframe to calculate RMSE rmse=evaluator.evaluate(predicted_ratings) # COMMAND ---------- #print RMSE error print(rmse) # COMMAND ---------- #Recommend top movies which user might like # COMMAND ---------- #create dataset of all distinct movies unique_movies=indexed.select('title_new').distinct() # COMMAND ---------- #number of unique movies unique_movies.count() # COMMAND ---------- #assigning alias name 'a' to unique movies df a = unique_movies.alias('a') # COMMAND ---------- user_id=85 # COMMAND ---------- #creating another dataframe which contains already watched movie by active user watched_movies=indexed.filter(indexed['userId'] == user_id).select('title_new').distinct() # COMMAND ---------- #number of movies already rated watched_movies.count() # COMMAND ---------- #assigning alias name 'b' to watched movies df b=watched_movies.alias('b') # COMMAND ---------- #joining both tables on left join total_movies = a.join(b, a.title_new == b.title_new,how='left') # COMMAND ---------- total_movies.show(10,False) # COMMAND ---------- #selecting movies which active user is yet to rate or watch remaining_movies=total_movies.where(col("b.title_new").isNull()).select(a.title_new).distinct() # COMMAND ---------- #number of movies user is yet to rate remaining_movies.count() # COMMAND ---------- #adding new column of user_Id of active useer to remaining movies df remaining_movies=remaining_movies.withColumn("userId",lit(int(user_id))) # COMMAND ---------- remaining_movies.show(10,False) # COMMAND ---------- #making recommendations using ALS recommender model and selecting only top 'n' movies recommendations=rec_model.transform(remaining_movies).orderBy('prediction',ascending=False) # COMMAND ---------- recommendations.show(5,False) # COMMAND ---------- #converting title_new values back to movie titles movie_title = IndexToString(inputCol="title_new", outputCol="title",labels=model.labels) final_recommendations=movie_title.transform(recommendations) # COMMAND ---------- final_recommendations.show(10,False) # COMMAND ---------- #create function to recommend top 'n' movies to any particular user def top_movies(user_id,n): """ This function returns the top 'n' movies that user has not seen yet but might like """ #assigning alias name 'a' to unique movies df a = unique_movies.alias('a') #creating another dataframe which contains already watched movie by active user watched_movies=indexed.filter(indexed['userId'] == user_id).select('title_new') #assigning alias name 'b' to watched movies df b=watched_movies.alias('b') #joining both tables on left join total_movies = a.join(b, a.title_new == b.title_new,how='left') #selecting movies which active user is yet to rate or watch remaining_movies=total_movies.where(col("b.title_new").isNull()).select(a.title_new).distinct() #adding new column of user_Id of active useer to remaining movies df remaining_movies=remaining_movies.withColumn("userId",lit(int(user_id))) #making recommendations using ALS recommender model and selecting only top 'n' movies recommendations=rec_model.transform(remaining_movies).orderBy('prediction',ascending=False).limit(n) #adding columns of movie titles in recommendations movie_title = IndexToString(inputCol="title_new", outputCol="title",labels=model.labels) final_recommendations=movie_title.transform(recommendations) #return the recommendations to active user return final_recommendations.show(n,False) # COMMAND ---------- top_movies(85,10) # COMMAND ----------
flexible
{ "blob_id": "d22ebe24605065452ae35c44367ee21a726ae7a1", "index": 1892, "step-1": "<mask token>\n\n\ndef loadDataFrame(fileName, fileSchema):\n return spark.read.format('csv').schema(fileSchema).option('header', 'true'\n ).option('mode', 'DROPMALFORMED').csv('/FileStore/tables/%s' % fileName\n )\n\n\n<mask token>\n\n\ndef top_movies(user_id, n):\n \"\"\"\n This function returns the top 'n' movies that user has not seen yet but might like \n \n \"\"\"\n a = unique_movies.alias('a')\n watched_movies = indexed.filter(indexed['userId'] == user_id).select(\n 'title_new')\n b = watched_movies.alias('b')\n total_movies = a.join(b, a.title_new == b.title_new, how='left')\n remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a\n .title_new).distinct()\n remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))\n recommendations = rec_model.transform(remaining_movies).orderBy(\n 'prediction', ascending=False).limit(n)\n movie_title = IndexToString(inputCol='title_new', outputCol='title',\n labels=model.labels)\n final_recommendations = movie_title.transform(recommendations)\n return final_recommendations.show(n, False)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef loadDataFrame(fileName, fileSchema):\n return spark.read.format('csv').schema(fileSchema).option('header', 'true'\n ).option('mode', 'DROPMALFORMED').csv('/FileStore/tables/%s' % fileName\n )\n\n\n<mask token>\nprint((df.count(), len(df.columns)))\ndf.printSchema()\ndf.orderBy(rand()).show(10, False)\ndf.groupBy('userId').count().orderBy('count', ascending=False).show(10, False)\ndf.groupBy('userId').count().orderBy('count', ascending=True).show(10, False)\ndf.groupBy('title').count().orderBy('count', ascending=False).show(10, False)\ndf.groupBy('title').count().orderBy('count', ascending=True).show(10, False)\n<mask token>\nindexed.show(10)\nindexed.groupBy('title_new').count().orderBy('count', ascending=False).show(\n 10, False)\n<mask token>\ntrain.count()\ntest.count()\n<mask token>\npredicted_ratings.printSchema()\npredicted_ratings.orderBy(rand()).show(10)\n<mask token>\nprint(rmse)\n<mask token>\nunique_movies.count()\n<mask token>\nwatched_movies.count()\n<mask token>\ntotal_movies.show(10, False)\n<mask token>\nremaining_movies.count()\n<mask token>\nremaining_movies.show(10, False)\n<mask token>\nrecommendations.show(5, False)\n<mask token>\nfinal_recommendations.show(10, False)\n\n\ndef top_movies(user_id, n):\n \"\"\"\n This function returns the top 'n' movies that user has not seen yet but might like \n \n \"\"\"\n a = unique_movies.alias('a')\n watched_movies = indexed.filter(indexed['userId'] == user_id).select(\n 'title_new')\n b = watched_movies.alias('b')\n total_movies = a.join(b, a.title_new == b.title_new, how='left')\n remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a\n .title_new).distinct()\n remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))\n recommendations = rec_model.transform(remaining_movies).orderBy(\n 'prediction', ascending=False).limit(n)\n movie_title = IndexToString(inputCol='title_new', outputCol='title',\n labels=model.labels)\n final_recommendations = movie_title.transform(recommendations)\n return final_recommendations.show(n, False)\n\n\ntop_movies(85, 10)\n", "step-3": "<mask token>\nspark = SparkSession.builder.appName('rc').getOrCreate()\n<mask token>\n\n\ndef loadDataFrame(fileName, fileSchema):\n return spark.read.format('csv').schema(fileSchema).option('header', 'true'\n ).option('mode', 'DROPMALFORMED').csv('/FileStore/tables/%s' % fileName\n )\n\n\n<mask token>\nmovieRatingSchema = StructType([StructField('userId', IntegerType(), True),\n StructField('movieId', IntegerType(), True), StructField('rating',\n FloatType(), True), StructField('timestamp', StringType(), True)])\nmovieSchema = StructType([StructField('movieId', IntegerType(), True),\n StructField('title', StringType(), True), StructField('genres',\n StringType(), True)])\nMovieRatingsDF = loadDataFrame('ratings.csv', movieRatingSchema).cache()\nMoviesDF = loadDataFrame('movies.csv', movieSchema).cache()\ndf = MovieRatingsDF.join(MoviesDF, 'movieId').select(['userId', 'title',\n 'rating'])\nprint((df.count(), len(df.columns)))\ndf.printSchema()\ndf.orderBy(rand()).show(10, False)\ndf.groupBy('userId').count().orderBy('count', ascending=False).show(10, False)\ndf.groupBy('userId').count().orderBy('count', ascending=True).show(10, False)\ndf.groupBy('title').count().orderBy('count', ascending=False).show(10, False)\ndf.groupBy('title').count().orderBy('count', ascending=True).show(10, False)\n<mask token>\nstringIndexer = StringIndexer(inputCol='title', outputCol='title_new')\nmodel = stringIndexer.fit(df)\nindexed = model.transform(df)\nindexed.show(10)\nindexed.groupBy('title_new').count().orderBy('count', ascending=False).show(\n 10, False)\ntrain, test = indexed.randomSplit([0.75, 0.25])\ntrain.count()\ntest.count()\n<mask token>\nrec = ALS(maxIter=10, regParam=0.01, userCol='userId', itemCol='title_new',\n ratingCol='rating', nonnegative=True, coldStartStrategy='drop')\nrec_model = rec.fit(train)\npredicted_ratings = rec_model.transform(test)\npredicted_ratings.printSchema()\npredicted_ratings.orderBy(rand()).show(10)\n<mask token>\nevaluator = RegressionEvaluator(metricName='rmse', predictionCol=\n 'prediction', labelCol='rating')\nrmse = evaluator.evaluate(predicted_ratings)\nprint(rmse)\nunique_movies = indexed.select('title_new').distinct()\nunique_movies.count()\na = unique_movies.alias('a')\nuser_id = 85\nwatched_movies = indexed.filter(indexed['userId'] == user_id).select(\n 'title_new').distinct()\nwatched_movies.count()\nb = watched_movies.alias('b')\ntotal_movies = a.join(b, a.title_new == b.title_new, how='left')\ntotal_movies.show(10, False)\nremaining_movies = total_movies.where(col('b.title_new').isNull()).select(a\n .title_new).distinct()\nremaining_movies.count()\nremaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))\nremaining_movies.show(10, False)\nrecommendations = rec_model.transform(remaining_movies).orderBy('prediction',\n ascending=False)\nrecommendations.show(5, False)\nmovie_title = IndexToString(inputCol='title_new', outputCol='title', labels\n =model.labels)\nfinal_recommendations = movie_title.transform(recommendations)\nfinal_recommendations.show(10, False)\n\n\ndef top_movies(user_id, n):\n \"\"\"\n This function returns the top 'n' movies that user has not seen yet but might like \n \n \"\"\"\n a = unique_movies.alias('a')\n watched_movies = indexed.filter(indexed['userId'] == user_id).select(\n 'title_new')\n b = watched_movies.alias('b')\n total_movies = a.join(b, a.title_new == b.title_new, how='left')\n remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a\n .title_new).distinct()\n remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))\n recommendations = rec_model.transform(remaining_movies).orderBy(\n 'prediction', ascending=False).limit(n)\n movie_title = IndexToString(inputCol='title_new', outputCol='title',\n labels=model.labels)\n final_recommendations = movie_title.transform(recommendations)\n return final_recommendations.show(n, False)\n\n\ntop_movies(85, 10)\n", "step-4": "from pyspark.sql import SparkSession\nspark = SparkSession.builder.appName('rc').getOrCreate()\nfrom pyspark.sql.functions import *\n\n\ndef loadDataFrame(fileName, fileSchema):\n return spark.read.format('csv').schema(fileSchema).option('header', 'true'\n ).option('mode', 'DROPMALFORMED').csv('/FileStore/tables/%s' % fileName\n )\n\n\nfrom pyspark.sql.types import *\nmovieRatingSchema = StructType([StructField('userId', IntegerType(), True),\n StructField('movieId', IntegerType(), True), StructField('rating',\n FloatType(), True), StructField('timestamp', StringType(), True)])\nmovieSchema = StructType([StructField('movieId', IntegerType(), True),\n StructField('title', StringType(), True), StructField('genres',\n StringType(), True)])\nMovieRatingsDF = loadDataFrame('ratings.csv', movieRatingSchema).cache()\nMoviesDF = loadDataFrame('movies.csv', movieSchema).cache()\ndf = MovieRatingsDF.join(MoviesDF, 'movieId').select(['userId', 'title',\n 'rating'])\nprint((df.count(), len(df.columns)))\ndf.printSchema()\ndf.orderBy(rand()).show(10, False)\ndf.groupBy('userId').count().orderBy('count', ascending=False).show(10, False)\ndf.groupBy('userId').count().orderBy('count', ascending=True).show(10, False)\ndf.groupBy('title').count().orderBy('count', ascending=False).show(10, False)\ndf.groupBy('title').count().orderBy('count', ascending=True).show(10, False)\nfrom pyspark.ml.feature import StringIndexer, IndexToString\nstringIndexer = StringIndexer(inputCol='title', outputCol='title_new')\nmodel = stringIndexer.fit(df)\nindexed = model.transform(df)\nindexed.show(10)\nindexed.groupBy('title_new').count().orderBy('count', ascending=False).show(\n 10, False)\ntrain, test = indexed.randomSplit([0.75, 0.25])\ntrain.count()\ntest.count()\nfrom pyspark.ml.recommendation import ALS\nrec = ALS(maxIter=10, regParam=0.01, userCol='userId', itemCol='title_new',\n ratingCol='rating', nonnegative=True, coldStartStrategy='drop')\nrec_model = rec.fit(train)\npredicted_ratings = rec_model.transform(test)\npredicted_ratings.printSchema()\npredicted_ratings.orderBy(rand()).show(10)\nfrom pyspark.ml.evaluation import RegressionEvaluator\nevaluator = RegressionEvaluator(metricName='rmse', predictionCol=\n 'prediction', labelCol='rating')\nrmse = evaluator.evaluate(predicted_ratings)\nprint(rmse)\nunique_movies = indexed.select('title_new').distinct()\nunique_movies.count()\na = unique_movies.alias('a')\nuser_id = 85\nwatched_movies = indexed.filter(indexed['userId'] == user_id).select(\n 'title_new').distinct()\nwatched_movies.count()\nb = watched_movies.alias('b')\ntotal_movies = a.join(b, a.title_new == b.title_new, how='left')\ntotal_movies.show(10, False)\nremaining_movies = total_movies.where(col('b.title_new').isNull()).select(a\n .title_new).distinct()\nremaining_movies.count()\nremaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))\nremaining_movies.show(10, False)\nrecommendations = rec_model.transform(remaining_movies).orderBy('prediction',\n ascending=False)\nrecommendations.show(5, False)\nmovie_title = IndexToString(inputCol='title_new', outputCol='title', labels\n =model.labels)\nfinal_recommendations = movie_title.transform(recommendations)\nfinal_recommendations.show(10, False)\n\n\ndef top_movies(user_id, n):\n \"\"\"\n This function returns the top 'n' movies that user has not seen yet but might like \n \n \"\"\"\n a = unique_movies.alias('a')\n watched_movies = indexed.filter(indexed['userId'] == user_id).select(\n 'title_new')\n b = watched_movies.alias('b')\n total_movies = a.join(b, a.title_new == b.title_new, how='left')\n remaining_movies = total_movies.where(col('b.title_new').isNull()).select(a\n .title_new).distinct()\n remaining_movies = remaining_movies.withColumn('userId', lit(int(user_id)))\n recommendations = rec_model.transform(remaining_movies).orderBy(\n 'prediction', ascending=False).limit(n)\n movie_title = IndexToString(inputCol='title_new', outputCol='title',\n labels=model.labels)\n final_recommendations = movie_title.transform(recommendations)\n return final_recommendations.show(n, False)\n\n\ntop_movies(85, 10)\n", "step-5": "# Databricks notebook source\n#import and create sparksession object\nfrom pyspark.sql import SparkSession \nspark=SparkSession.builder.appName('rc').getOrCreate()\n\n# COMMAND ----------\n\n#import the required functions and libraries\nfrom pyspark.sql.functions import *\n\n# COMMAND ----------\n\n# Convert csv file to Spark DataFrame (Databricks version)\ndef loadDataFrame(fileName, fileSchema):\n return (spark.read.format(\"csv\")\n .schema(fileSchema)\n .option(\"header\", \"true\")\n .option(\"mode\", \"DROPMALFORMED\")\n .csv(\"/FileStore/tables/%s\" % (fileName)))\n\n# COMMAND ----------\n\nfrom pyspark.sql.types import *\n\nmovieRatingSchema = StructType([\n StructField(\"userId\", IntegerType(), True),\n StructField(\"movieId\", IntegerType(), True),\n StructField(\"rating\", FloatType(), True),\n StructField(\"timestamp\", StringType(), True)])\n\nmovieSchema = StructType([\n StructField(\"movieId\", IntegerType(), True),\n StructField(\"title\", StringType(), True),\n StructField(\"genres\", StringType(), True)])\n\nMovieRatingsDF = loadDataFrame(\"ratings.csv\", movieRatingSchema).cache()\nMoviesDF = loadDataFrame(\"movies.csv\", movieSchema).cache()\n\n# COMMAND ----------\n\n#load the dataset and create sprk dataframe\ndf = MovieRatingsDF.join(MoviesDF, 'movieId').select(['userId', 'title', 'rating'])\n\n\n#df=spark.read.csv('movie_ratings_df.csv',inferSchema=True,header=True)\n\n# COMMAND ----------\n\n#validate the shape of the data \nprint((df.count(),len(df.columns)))\n\n# COMMAND ----------\n\n#check columns in dataframe\ndf.printSchema()\n\n# COMMAND ----------\n\n#validate few rows of dataframe in random order\ndf.orderBy(rand()).show(10,False)\n\n# COMMAND ----------\n\n#check number of ratings by each user\ndf.groupBy('userId').count().orderBy('count',ascending=False).show(10,False)\n\n# COMMAND ----------\n\n#check number of ratings by each user\ndf.groupBy('userId').count().orderBy('count',ascending=True).show(10,False)\n\n# COMMAND ----------\n\n#number of times movie been rated \ndf.groupBy('title').count().orderBy('count',ascending=False).show(10,False)\n\n# COMMAND ----------\n\ndf.groupBy('title').count().orderBy('count',ascending=True).show(10,False)\n\n# COMMAND ----------\n\n#import String indexer to convert string values to numeric values\nfrom pyspark.ml.feature import StringIndexer,IndexToString\n\n# COMMAND ----------\n\n#creating string indexer to convert the movie title column values into numerical values\nstringIndexer = StringIndexer(inputCol=\"title\", outputCol=\"title_new\")\n\n# COMMAND ----------\n\n#applying stringindexer object on dataframe movie title column\nmodel = stringIndexer.fit(df)\n\n# COMMAND ----------\n\n#creating new dataframe with transformed values\nindexed = model.transform(df)\n\n# COMMAND ----------\n\n#validate the numerical title values\nindexed.show(10)\n\n# COMMAND ----------\n\n#number of times each numerical movie title has been rated \nindexed.groupBy('title_new').count().orderBy('count',ascending=False).show(10,False)\n\n# COMMAND ----------\n\n#split the data into training and test datatset\ntrain,test=indexed.randomSplit([0.75,0.25])\n\n# COMMAND ----------\n\n#count number of records in train set\ntrain.count()\n\n# COMMAND ----------\n\n#count number of records in test set\ntest.count()\n\n# COMMAND ----------\n\n#import ALS recommender function from pyspark ml library\nfrom pyspark.ml.recommendation import ALS\n\n# COMMAND ----------\n\n#Training the recommender model using train datatset\nrec=ALS(maxIter=10,regParam=0.01,userCol='userId',itemCol='title_new',ratingCol='rating',nonnegative=True,coldStartStrategy=\"drop\")\n\n# COMMAND ----------\n\n#fit the model on train set\nrec_model=rec.fit(train)\n\n# COMMAND ----------\n\n#making predictions on test set \npredicted_ratings=rec_model.transform(test)\n\n# COMMAND ----------\n\n#columns in predicted ratings dataframe\npredicted_ratings.printSchema()\n\n# COMMAND ----------\n\n#predicted vs actual ratings for test set \npredicted_ratings.orderBy(rand()).show(10)\n\n# COMMAND ----------\n\n#importing Regression Evaluator to measure RMSE\nfrom pyspark.ml.evaluation import RegressionEvaluator\n\n# COMMAND ----------\n\n#create Regressor evaluator object for measuring accuracy\nevaluator=RegressionEvaluator(metricName='rmse',predictionCol='prediction',labelCol='rating')\n\n# COMMAND ----------\n\n#apply the RE on predictions dataframe to calculate RMSE\nrmse=evaluator.evaluate(predicted_ratings)\n\n# COMMAND ----------\n\n#print RMSE error\nprint(rmse)\n\n# COMMAND ----------\n\n#Recommend top movies which user might like \n\n# COMMAND ----------\n\n#create dataset of all distinct movies \nunique_movies=indexed.select('title_new').distinct()\n\n# COMMAND ----------\n\n#number of unique movies\nunique_movies.count()\n\n# COMMAND ----------\n\n#assigning alias name 'a' to unique movies df\na = unique_movies.alias('a')\n\n# COMMAND ----------\n\nuser_id=85\n\n# COMMAND ----------\n\n#creating another dataframe which contains already watched movie by active user \nwatched_movies=indexed.filter(indexed['userId'] == user_id).select('title_new').distinct()\n\n# COMMAND ----------\n\n#number of movies already rated \nwatched_movies.count()\n\n# COMMAND ----------\n\n#assigning alias name 'b' to watched movies df\nb=watched_movies.alias('b')\n\n# COMMAND ----------\n\n#joining both tables on left join \ntotal_movies = a.join(b, a.title_new == b.title_new,how='left')\n\n\n# COMMAND ----------\n\ntotal_movies.show(10,False)\n\n# COMMAND ----------\n\n#selecting movies which active user is yet to rate or watch\nremaining_movies=total_movies.where(col(\"b.title_new\").isNull()).select(a.title_new).distinct()\n\n# COMMAND ----------\n\n#number of movies user is yet to rate \nremaining_movies.count()\n\n# COMMAND ----------\n\n#adding new column of user_Id of active useer to remaining movies df \nremaining_movies=remaining_movies.withColumn(\"userId\",lit(int(user_id)))\n\n\n# COMMAND ----------\n\nremaining_movies.show(10,False)\n\n# COMMAND ----------\n\n#making recommendations using ALS recommender model and selecting only top 'n' movies\nrecommendations=rec_model.transform(remaining_movies).orderBy('prediction',ascending=False)\n\n# COMMAND ----------\n\nrecommendations.show(5,False)\n\n# COMMAND ----------\n\n#converting title_new values back to movie titles\nmovie_title = IndexToString(inputCol=\"title_new\", outputCol=\"title\",labels=model.labels)\n\nfinal_recommendations=movie_title.transform(recommendations)\n\n\n# COMMAND ----------\n\nfinal_recommendations.show(10,False)\n\n# COMMAND ----------\n\n#create function to recommend top 'n' movies to any particular user\ndef top_movies(user_id,n):\n \"\"\"\n This function returns the top 'n' movies that user has not seen yet but might like \n \n \"\"\"\n #assigning alias name 'a' to unique movies df\n a = unique_movies.alias('a')\n \n #creating another dataframe which contains already watched movie by active user \n watched_movies=indexed.filter(indexed['userId'] == user_id).select('title_new')\n \n #assigning alias name 'b' to watched movies df\n b=watched_movies.alias('b')\n \n #joining both tables on left join \n total_movies = a.join(b, a.title_new == b.title_new,how='left')\n \n #selecting movies which active user is yet to rate or watch\n remaining_movies=total_movies.where(col(\"b.title_new\").isNull()).select(a.title_new).distinct()\n \n \n #adding new column of user_Id of active useer to remaining movies df \n remaining_movies=remaining_movies.withColumn(\"userId\",lit(int(user_id)))\n \n \n #making recommendations using ALS recommender model and selecting only top 'n' movies\n recommendations=rec_model.transform(remaining_movies).orderBy('prediction',ascending=False).limit(n)\n \n \n #adding columns of movie titles in recommendations\n movie_title = IndexToString(inputCol=\"title_new\", outputCol=\"title\",labels=model.labels)\n final_recommendations=movie_title.transform(recommendations)\n \n #return the recommendations to active user\n return final_recommendations.show(n,False)\n\n# COMMAND ----------\n\ntop_movies(85,10)\n\n# COMMAND ----------\n\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
def progress_format(user): json = dict() json["progres_id"] = user[0] json["percentage"] = user[1] json["user_id"] = user[2] json["technology"] = user[3] return json def progresses_format(users): json = dict() json["users_progresses"] = list() for user in users: json["users_progresses"].append(progress_format(user)) return json def progress_percentage_formating(progresses): response = dict() response['response'] = list() for progress in progresses: json = dict() json["name"] = progress[1] json["percentage"] = progress[0] response['response'].append(json) return response
normal
{ "blob_id": "6ebf6bdfc6a4a1fe49f4eed1a2c1802f8adeef08", "index": 1195, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef progresses_format(users):\n json = dict()\n json['users_progresses'] = list()\n for user in users:\n json['users_progresses'].append(progress_format(user))\n return json\n\n\n<mask token>\n", "step-3": "def progress_format(user):\n json = dict()\n json['progres_id'] = user[0]\n json['percentage'] = user[1]\n json['user_id'] = user[2]\n json['technology'] = user[3]\n return json\n\n\ndef progresses_format(users):\n json = dict()\n json['users_progresses'] = list()\n for user in users:\n json['users_progresses'].append(progress_format(user))\n return json\n\n\n<mask token>\n", "step-4": "def progress_format(user):\n json = dict()\n json['progres_id'] = user[0]\n json['percentage'] = user[1]\n json['user_id'] = user[2]\n json['technology'] = user[3]\n return json\n\n\ndef progresses_format(users):\n json = dict()\n json['users_progresses'] = list()\n for user in users:\n json['users_progresses'].append(progress_format(user))\n return json\n\n\ndef progress_percentage_formating(progresses):\n response = dict()\n response['response'] = list()\n for progress in progresses:\n json = dict()\n json['name'] = progress[1]\n json['percentage'] = progress[0]\n response['response'].append(json)\n return response\n", "step-5": "def progress_format(user):\r\n json = dict()\r\n json[\"progres_id\"] = user[0]\r\n json[\"percentage\"] = user[1]\r\n json[\"user_id\"] = user[2]\r\n json[\"technology\"] = user[3]\r\n return json\r\n\r\ndef progresses_format(users):\r\n json = dict()\r\n json[\"users_progresses\"] = list()\r\n for user in users:\r\n json[\"users_progresses\"].append(progress_format(user))\r\n return json\r\n\r\ndef progress_percentage_formating(progresses):\r\n response = dict()\r\n response['response'] = list()\r\n for progress in progresses:\r\n json = dict()\r\n json[\"name\"] = progress[1]\r\n json[\"percentage\"] = progress[0]\r\n response['response'].append(json)\r\n return response", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> class Solution: <|reserved_special_token_0|> <|reserved_special_token_1|> class Solution: def addBinary(self, a: str, b: str) ->str: if len(a) < len(b): a = '0' * (len(b) - len(a)) + a else: b = '0' * (len(a) - len(b)) + b last_pointer = len(a) - 1 mark = 0 res = [] while last_pointer >= 0: tmp = int(a[last_pointer]) + int(b[last_pointer]) + mark if tmp >= 2: mark = tmp // 2 res.insert(0, str(tmp % 2)) else: res.insert(0, str(tmp)) mark = 0 last_pointer -= 1 if last_pointer == -1 and mark != 0: res.insert(0, str(mark)) return ''.join(res) <|reserved_special_token_1|> # # @lc app=leetcode id=67 lang=python3 # # [67] Add Binary # # https://leetcode.com/problems/add-binary/description/ # # algorithms # Easy (46.70%) # Likes: 2566 # Dislikes: 331 # Total Accepted: 572.1K # Total Submissions: 1.2M # Testcase Example: '"11"\n"1"' # # Given two binary strings a and b, return their sum as a binary string. # # # Example 1: # Input: a = "11", b = "1" # Output: "100" # Example 2: # Input: a = "1010", b = "1011" # Output: "10101" # # # Constraints: # # # 1 <= a.length, b.length <= 10^4 # a and b consist only of '0' or '1' characters. # Each string does not contain leading zeros except for the zero itself. # # # # @lc code=start class Solution: def addBinary(self, a: str, b: str) -> str: if len(a) < len(b): a = "0" * (len(b) - len(a)) + a else: b = "0" * (len(a) - len(b)) + b last_pointer = len(a) - 1 mark = 0 res = [] while last_pointer >= 0: tmp = int(a[last_pointer]) + int(b[last_pointer]) + mark if tmp >= 2: mark = tmp // 2 res.insert(0, str(tmp % 2)) else: res.insert(0, str(tmp)) mark = 0 last_pointer -= 1 if last_pointer == -1 and mark != 0: res.insert(0, str(mark)) return "".join(res) # @lc code=end
flexible
{ "blob_id": "227a56c970a74d515ab694d2c0924885e2209cfe", "index": 7089, "step-1": "<mask token>\n", "step-2": "class Solution:\n <mask token>\n", "step-3": "class Solution:\n\n def addBinary(self, a: str, b: str) ->str:\n if len(a) < len(b):\n a = '0' * (len(b) - len(a)) + a\n else:\n b = '0' * (len(a) - len(b)) + b\n last_pointer = len(a) - 1\n mark = 0\n res = []\n while last_pointer >= 0:\n tmp = int(a[last_pointer]) + int(b[last_pointer]) + mark\n if tmp >= 2:\n mark = tmp // 2\n res.insert(0, str(tmp % 2))\n else:\n res.insert(0, str(tmp))\n mark = 0\n last_pointer -= 1\n if last_pointer == -1 and mark != 0:\n res.insert(0, str(mark))\n return ''.join(res)\n", "step-4": "#\n# @lc app=leetcode id=67 lang=python3\n#\n# [67] Add Binary\n#\n# https://leetcode.com/problems/add-binary/description/\n#\n# algorithms\n# Easy (46.70%)\n# Likes: 2566\n# Dislikes: 331\n# Total Accepted: 572.1K\n# Total Submissions: 1.2M\n# Testcase Example: '\"11\"\\n\"1\"'\n#\n# Given two binary strings a and b, return their sum as a binary string.\n# \n# \n# Example 1:\n# Input: a = \"11\", b = \"1\"\n# Output: \"100\"\n# Example 2:\n# Input: a = \"1010\", b = \"1011\"\n# Output: \"10101\"\n# \n# \n# Constraints:\n# \n# \n# 1 <= a.length, b.length <= 10^4\n# a and b consist only of '0' or '1' characters.\n# Each string does not contain leading zeros except for the zero itself.\n# \n# \n#\n\n# @lc code=start\nclass Solution:\n def addBinary(self, a: str, b: str) -> str:\n if len(a) < len(b):\n a = \"0\" * (len(b) - len(a)) + a\n else:\n b = \"0\" * (len(a) - len(b)) + b\n\n last_pointer = len(a) - 1\n mark = 0\n res = []\n while last_pointer >= 0:\n tmp = int(a[last_pointer]) + int(b[last_pointer]) + mark\n if tmp >= 2:\n mark = tmp // 2\n res.insert(0, str(tmp % 2))\n else:\n res.insert(0, str(tmp))\n mark = 0\n\n\n last_pointer -= 1\n\n if last_pointer == -1 and mark != 0:\n res.insert(0, str(mark))\n\n return \"\".join(res)\n \n# @lc code=end\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> from __future__ import absolute_import from __future__ import division from __future__ import print_function from .BLWecc import curve, setCurve, getPublicKey, getPrivateKey, getAddress as getAddressByCode, pub2add as getAddressByPublicKey, sign, verifyTx as verify <|reserved_special_token_1|> from __future__ import absolute_import from __future__ import division from __future__ import print_function from .BLWecc import ( curve, setCurve, getPublicKey, getPrivateKey, getAddress as getAddressByCode, pub2add as getAddressByPublicKey, sign, verifyTx as verify, )
flexible
{ "blob_id": "25ee13314c7cf828b8805d9f483bd5ee12073228", "index": 8004, "step-1": "<mask token>\n", "step-2": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom .BLWecc import curve, setCurve, getPublicKey, getPrivateKey, getAddress as getAddressByCode, pub2add as getAddressByPublicKey, sign, verifyTx as verify\n", "step-3": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom .BLWecc import (\ncurve,\nsetCurve,\ngetPublicKey,\ngetPrivateKey,\ngetAddress as getAddressByCode, \npub2add as getAddressByPublicKey,\nsign,\nverifyTx as verify,\n)\n\n\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class TimezoneMiddleware(object): <|reserved_special_token_0|> def process_request(self, request): user = request.user if hasattr(user, 'profile'): user_tz = user.profile.timezone timezone.activate(pytz.timezone(user_tz)) else: timezone.activate(pytz.timezone('UTC')) <|reserved_special_token_1|> <|reserved_special_token_0|> class TimezoneMiddleware(object): """ Middleware to get user's timezone and activate timezone if user timezone is not available default value 'UTC' is activated """ def process_request(self, request): user = request.user if hasattr(user, 'profile'): user_tz = user.profile.timezone timezone.activate(pytz.timezone(user_tz)) else: timezone.activate(pytz.timezone('UTC')) <|reserved_special_token_1|> import pytz from django.utils import timezone class TimezoneMiddleware(object): """ Middleware to get user's timezone and activate timezone if user timezone is not available default value 'UTC' is activated """ def process_request(self, request): user = request.user if hasattr(user, 'profile'): user_tz = user.profile.timezone timezone.activate(pytz.timezone(user_tz)) else: timezone.activate(pytz.timezone('UTC'))
flexible
{ "blob_id": "839d4182663983a03975465d3909631bd6db1d83", "index": 9919, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass TimezoneMiddleware(object):\n <mask token>\n\n def process_request(self, request):\n user = request.user\n if hasattr(user, 'profile'):\n user_tz = user.profile.timezone\n timezone.activate(pytz.timezone(user_tz))\n else:\n timezone.activate(pytz.timezone('UTC'))\n", "step-3": "<mask token>\n\n\nclass TimezoneMiddleware(object):\n \"\"\" Middleware to get user's timezone and activate timezone \n if user timezone is not available default value 'UTC' is activated \"\"\"\n\n def process_request(self, request):\n user = request.user\n if hasattr(user, 'profile'):\n user_tz = user.profile.timezone\n timezone.activate(pytz.timezone(user_tz))\n else:\n timezone.activate(pytz.timezone('UTC'))\n", "step-4": "import pytz\nfrom django.utils import timezone\n\n\nclass TimezoneMiddleware(object):\n \"\"\" Middleware to get user's timezone and activate timezone \n if user timezone is not available default value 'UTC' is activated \"\"\"\n\n def process_request(self, request):\n user = request.user\n if hasattr(user, 'profile'):\n user_tz = user.profile.timezone\n timezone.activate(pytz.timezone(user_tz))\n else:\n timezone.activate(pytz.timezone('UTC'))\n", "step-5": null, "step-ids": [ 0, 2, 3, 4 ] }
[ 0, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def minimize_neldermead(func, x0, args=(), callback=None, maxiter=None, maxfev=None, disp=False, return_all=False, initial_simplex=None, xatol= 0.0001, fatol=0.0001, **unknown_options): """ Minimization of scalar function of one or more variables using the Nelder-Mead algorithm. Options ------- disp : bool Set to True to print convergence messages. maxiter, maxfev : int Maximum allowed number of iterations and function evaluations. Will default to ``N*200``, where ``N`` is the number of variables, if neither `maxiter` or `maxfev` is set. If both `maxiter` and `maxfev` are set, minimization will stop at the first reached. initial_simplex : array_like of shape (N + 1, N) Initial simplex. If given, overrides `x0`. ``initial_simplex[j,:]`` should contain the coordinates of the j-th vertex of the ``N+1`` vertices in the simplex, where ``N`` is the dimension. xatol : float, optional Absolute error in xopt between iterations that is acceptable for convergence. fatol : number, optional Absolute error in func(xopt) between iterations that is acceptable for convergence. """ maxfun = maxfev retall = return_all rho = 1 chi = 2 psi = 0.5 sigma = 0.5 nonzdelt = 0.05 zdelt = 0.00025 if initial_simplex is None: N = len(x0) sim = numpy.zeros((N + 1, N), dtype=x0.dtype) sim[0] = x0 for k in range(N): y = numpy.array(x0, copy=True) if y[k] != 0: y[k] = (1 + nonzdelt) * y[k] else: y[k] = zdelt sim[k + 1] = y maxiter = 10 maxfun = 10 one2np1 = list(range(1, N + 1)) fsim = numpy.zeros((N + 1,), float) for k in range(N + 1): fsim[k] = func(sim[k]) ind = numpy.argsort(fsim) fsim = numpy.take(fsim, ind, 0) sim = numpy.take(sim, ind, 0) raise Exception() print('aaaaffaaaaaa') iterations = 1 while iterations < maxiter: if numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0])) ) <= xatol and numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol: break logger.debug('itr: %s' % iterations) print('aaaaaaaaaa') xbar = numpy.add.reduce(sim[:-1], 0) / N xr = (1 + rho) * xbar - rho * sim[-1] fxr = func(xr) doshrink = 0 if fxr < fsim[0]: xe = (1 + rho * chi) * xbar - rho * chi * sim[-1] fxe = func(xe) if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr elif fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: if fxr < fsim[-1]: xc = (1 + psi * rho) * xbar - psi * rho * sim[-1] fxc = func(xc) if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink = 1 else: xcc = (1 - psi) * xbar + psi * sim[-1] fxcc = func(xcc) if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma * (sim[j] - sim[0]) fsim[j] = func(sim[j]) ind = numpy.argsort(fsim) sim = numpy.take(sim, ind, 0) fsim = numpy.take(fsim, ind, 0) if callback is not None: callback(sim[0]) iterations += 1 x = sim[0] fval = numpy.min(fsim) warnflag = 0 result = OptimizeResult(fun=fval, nit=iterations, nfev=0, status= warnflag, success=warnflag == 0, message=None, x=x, final_simplex=( sim, fsim)) return result <|reserved_special_token_1|> <|reserved_special_token_0|> logger = getLogger(__name__) def minimize_neldermead(func, x0, args=(), callback=None, maxiter=None, maxfev=None, disp=False, return_all=False, initial_simplex=None, xatol= 0.0001, fatol=0.0001, **unknown_options): """ Minimization of scalar function of one or more variables using the Nelder-Mead algorithm. Options ------- disp : bool Set to True to print convergence messages. maxiter, maxfev : int Maximum allowed number of iterations and function evaluations. Will default to ``N*200``, where ``N`` is the number of variables, if neither `maxiter` or `maxfev` is set. If both `maxiter` and `maxfev` are set, minimization will stop at the first reached. initial_simplex : array_like of shape (N + 1, N) Initial simplex. If given, overrides `x0`. ``initial_simplex[j,:]`` should contain the coordinates of the j-th vertex of the ``N+1`` vertices in the simplex, where ``N`` is the dimension. xatol : float, optional Absolute error in xopt between iterations that is acceptable for convergence. fatol : number, optional Absolute error in func(xopt) between iterations that is acceptable for convergence. """ maxfun = maxfev retall = return_all rho = 1 chi = 2 psi = 0.5 sigma = 0.5 nonzdelt = 0.05 zdelt = 0.00025 if initial_simplex is None: N = len(x0) sim = numpy.zeros((N + 1, N), dtype=x0.dtype) sim[0] = x0 for k in range(N): y = numpy.array(x0, copy=True) if y[k] != 0: y[k] = (1 + nonzdelt) * y[k] else: y[k] = zdelt sim[k + 1] = y maxiter = 10 maxfun = 10 one2np1 = list(range(1, N + 1)) fsim = numpy.zeros((N + 1,), float) for k in range(N + 1): fsim[k] = func(sim[k]) ind = numpy.argsort(fsim) fsim = numpy.take(fsim, ind, 0) sim = numpy.take(sim, ind, 0) raise Exception() print('aaaaffaaaaaa') iterations = 1 while iterations < maxiter: if numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0])) ) <= xatol and numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol: break logger.debug('itr: %s' % iterations) print('aaaaaaaaaa') xbar = numpy.add.reduce(sim[:-1], 0) / N xr = (1 + rho) * xbar - rho * sim[-1] fxr = func(xr) doshrink = 0 if fxr < fsim[0]: xe = (1 + rho * chi) * xbar - rho * chi * sim[-1] fxe = func(xe) if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr elif fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: if fxr < fsim[-1]: xc = (1 + psi * rho) * xbar - psi * rho * sim[-1] fxc = func(xc) if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink = 1 else: xcc = (1 - psi) * xbar + psi * sim[-1] fxcc = func(xcc) if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma * (sim[j] - sim[0]) fsim[j] = func(sim[j]) ind = numpy.argsort(fsim) sim = numpy.take(sim, ind, 0) fsim = numpy.take(fsim, ind, 0) if callback is not None: callback(sim[0]) iterations += 1 x = sim[0] fval = numpy.min(fsim) warnflag = 0 result = OptimizeResult(fun=fval, nit=iterations, nfev=0, status= warnflag, success=warnflag == 0, message=None, x=x, final_simplex=( sim, fsim)) return result <|reserved_special_token_1|> import numpy from scipy.optimize import OptimizeResult from logging import getLogger logger = getLogger(__name__) def minimize_neldermead(func, x0, args=(), callback=None, maxiter=None, maxfev=None, disp=False, return_all=False, initial_simplex=None, xatol= 0.0001, fatol=0.0001, **unknown_options): """ Minimization of scalar function of one or more variables using the Nelder-Mead algorithm. Options ------- disp : bool Set to True to print convergence messages. maxiter, maxfev : int Maximum allowed number of iterations and function evaluations. Will default to ``N*200``, where ``N`` is the number of variables, if neither `maxiter` or `maxfev` is set. If both `maxiter` and `maxfev` are set, minimization will stop at the first reached. initial_simplex : array_like of shape (N + 1, N) Initial simplex. If given, overrides `x0`. ``initial_simplex[j,:]`` should contain the coordinates of the j-th vertex of the ``N+1`` vertices in the simplex, where ``N`` is the dimension. xatol : float, optional Absolute error in xopt between iterations that is acceptable for convergence. fatol : number, optional Absolute error in func(xopt) between iterations that is acceptable for convergence. """ maxfun = maxfev retall = return_all rho = 1 chi = 2 psi = 0.5 sigma = 0.5 nonzdelt = 0.05 zdelt = 0.00025 if initial_simplex is None: N = len(x0) sim = numpy.zeros((N + 1, N), dtype=x0.dtype) sim[0] = x0 for k in range(N): y = numpy.array(x0, copy=True) if y[k] != 0: y[k] = (1 + nonzdelt) * y[k] else: y[k] = zdelt sim[k + 1] = y maxiter = 10 maxfun = 10 one2np1 = list(range(1, N + 1)) fsim = numpy.zeros((N + 1,), float) for k in range(N + 1): fsim[k] = func(sim[k]) ind = numpy.argsort(fsim) fsim = numpy.take(fsim, ind, 0) sim = numpy.take(sim, ind, 0) raise Exception() print('aaaaffaaaaaa') iterations = 1 while iterations < maxiter: if numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0])) ) <= xatol and numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol: break logger.debug('itr: %s' % iterations) print('aaaaaaaaaa') xbar = numpy.add.reduce(sim[:-1], 0) / N xr = (1 + rho) * xbar - rho * sim[-1] fxr = func(xr) doshrink = 0 if fxr < fsim[0]: xe = (1 + rho * chi) * xbar - rho * chi * sim[-1] fxe = func(xe) if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr elif fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: if fxr < fsim[-1]: xc = (1 + psi * rho) * xbar - psi * rho * sim[-1] fxc = func(xc) if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink = 1 else: xcc = (1 - psi) * xbar + psi * sim[-1] fxcc = func(xcc) if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma * (sim[j] - sim[0]) fsim[j] = func(sim[j]) ind = numpy.argsort(fsim) sim = numpy.take(sim, ind, 0) fsim = numpy.take(fsim, ind, 0) if callback is not None: callback(sim[0]) iterations += 1 x = sim[0] fval = numpy.min(fsim) warnflag = 0 result = OptimizeResult(fun=fval, nit=iterations, nfev=0, status= warnflag, success=warnflag == 0, message=None, x=x, final_simplex=( sim, fsim)) return result <|reserved_special_token_1|> import numpy from scipy.optimize import OptimizeResult from logging import getLogger logger = getLogger(__name__) def minimize_neldermead(func, x0, args=(), callback=None, maxiter=None, maxfev=None, disp=False, return_all=False, initial_simplex=None, xatol=1e-4, fatol=1e-4, **unknown_options): """ Minimization of scalar function of one or more variables using the Nelder-Mead algorithm. Options ------- disp : bool Set to True to print convergence messages. maxiter, maxfev : int Maximum allowed number of iterations and function evaluations. Will default to ``N*200``, where ``N`` is the number of variables, if neither `maxiter` or `maxfev` is set. If both `maxiter` and `maxfev` are set, minimization will stop at the first reached. initial_simplex : array_like of shape (N + 1, N) Initial simplex. If given, overrides `x0`. ``initial_simplex[j,:]`` should contain the coordinates of the j-th vertex of the ``N+1`` vertices in the simplex, where ``N`` is the dimension. xatol : float, optional Absolute error in xopt between iterations that is acceptable for convergence. fatol : number, optional Absolute error in func(xopt) between iterations that is acceptable for convergence. """ maxfun = maxfev retall = return_all rho = 1 chi = 2 psi = 0.5 sigma = 0.5 nonzdelt = 0.05 zdelt = 0.00025 if initial_simplex is None: N = len(x0) sim = numpy.zeros((N + 1, N), dtype=x0.dtype) sim[0] = x0 for k in range(N): y = numpy.array(x0, copy=True) if y[k] != 0: y[k] = (1 + nonzdelt) * y[k] else: y[k] = zdelt sim[k + 1] = y maxiter = 10 maxfun = 10 one2np1 = list(range(1, N + 1)) fsim = numpy.zeros((N + 1,), float) for k in range(N + 1): fsim[k] = func(sim[k]) ind = numpy.argsort(fsim) fsim = numpy.take(fsim, ind, 0) # sort so sim[0,:] has the lowest function value sim = numpy.take(sim, ind, 0) raise Exception() print('aaaaffaaaaaa') iterations = 1 while iterations < maxiter: if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xatol and numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol): break logger.debug('itr: %s' % iterations) print('aaaaaaaaaa') xbar = numpy.add.reduce(sim[:-1], 0) / N xr = (1 + rho) * xbar - rho * sim[-1] fxr = func(xr) doshrink = 0 if fxr < fsim[0]: xe = (1 + rho * chi) * xbar - rho * chi * sim[-1] fxe = func(xe) if fxe < fxr: sim[-1] = xe fsim[-1] = fxe else: sim[-1] = xr fsim[-1] = fxr else: # fsim[0] <= fxr if fxr < fsim[-2]: sim[-1] = xr fsim[-1] = fxr else: # fxr >= fsim[-2] # Perform contraction if fxr < fsim[-1]: xc = (1 + psi * rho) * xbar - psi * rho * sim[-1] fxc = func(xc) if fxc <= fxr: sim[-1] = xc fsim[-1] = fxc else: doshrink = 1 else: # Perform an inside contraction xcc = (1 - psi) * xbar + psi * sim[-1] fxcc = func(xcc) if fxcc < fsim[-1]: sim[-1] = xcc fsim[-1] = fxcc else: doshrink = 1 if doshrink: for j in one2np1: sim[j] = sim[0] + sigma * (sim[j] - sim[0]) fsim[j] = func(sim[j]) ind = numpy.argsort(fsim) sim = numpy.take(sim, ind, 0) fsim = numpy.take(fsim, ind, 0) if callback is not None: callback(sim[0]) iterations += 1 x = sim[0] fval = numpy.min(fsim) warnflag = 0 result = OptimizeResult(fun=fval, nit=iterations, nfev=0, status=warnflag, success=(warnflag == 0), message=None, x=x, final_simplex=(sim, fsim)) return result
flexible
{ "blob_id": "35921b081e8e8c4da2b16afc20b27b636e9a6676", "index": 4761, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef minimize_neldermead(func, x0, args=(), callback=None, maxiter=None,\n maxfev=None, disp=False, return_all=False, initial_simplex=None, xatol=\n 0.0001, fatol=0.0001, **unknown_options):\n \"\"\"\n Minimization of scalar function of one or more variables using the\n Nelder-Mead algorithm.\n Options\n -------\n disp : bool\n Set to True to print convergence messages.\n maxiter, maxfev : int\n Maximum allowed number of iterations and function evaluations.\n Will default to ``N*200``, where ``N`` is the number of\n variables, if neither `maxiter` or `maxfev` is set. If both\n `maxiter` and `maxfev` are set, minimization will stop at the\n first reached.\n initial_simplex : array_like of shape (N + 1, N)\n Initial simplex. If given, overrides `x0`.\n ``initial_simplex[j,:]`` should contain the coordinates of\n the j-th vertex of the ``N+1`` vertices in the simplex, where\n ``N`` is the dimension.\n xatol : float, optional\n Absolute error in xopt between iterations that is acceptable for\n convergence.\n fatol : number, optional\n Absolute error in func(xopt) between iterations that is acceptable for\n convergence.\n \"\"\"\n maxfun = maxfev\n retall = return_all\n rho = 1\n chi = 2\n psi = 0.5\n sigma = 0.5\n nonzdelt = 0.05\n zdelt = 0.00025\n if initial_simplex is None:\n N = len(x0)\n sim = numpy.zeros((N + 1, N), dtype=x0.dtype)\n sim[0] = x0\n for k in range(N):\n y = numpy.array(x0, copy=True)\n if y[k] != 0:\n y[k] = (1 + nonzdelt) * y[k]\n else:\n y[k] = zdelt\n sim[k + 1] = y\n maxiter = 10\n maxfun = 10\n one2np1 = list(range(1, N + 1))\n fsim = numpy.zeros((N + 1,), float)\n for k in range(N + 1):\n fsim[k] = func(sim[k])\n ind = numpy.argsort(fsim)\n fsim = numpy.take(fsim, ind, 0)\n sim = numpy.take(sim, ind, 0)\n raise Exception()\n print('aaaaffaaaaaa')\n iterations = 1\n while iterations < maxiter:\n if numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))\n ) <= xatol and numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol:\n break\n logger.debug('itr: %s' % iterations)\n print('aaaaaaaaaa')\n xbar = numpy.add.reduce(sim[:-1], 0) / N\n xr = (1 + rho) * xbar - rho * sim[-1]\n fxr = func(xr)\n doshrink = 0\n if fxr < fsim[0]:\n xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]\n fxe = func(xe)\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n elif fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else:\n if fxr < fsim[-1]:\n xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]\n fxc = func(xc)\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink = 1\n else:\n xcc = (1 - psi) * xbar + psi * sim[-1]\n fxcc = func(xcc)\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma * (sim[j] - sim[0])\n fsim[j] = func(sim[j])\n ind = numpy.argsort(fsim)\n sim = numpy.take(sim, ind, 0)\n fsim = numpy.take(fsim, ind, 0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n x = sim[0]\n fval = numpy.min(fsim)\n warnflag = 0\n result = OptimizeResult(fun=fval, nit=iterations, nfev=0, status=\n warnflag, success=warnflag == 0, message=None, x=x, final_simplex=(\n sim, fsim))\n return result\n", "step-3": "<mask token>\nlogger = getLogger(__name__)\n\n\ndef minimize_neldermead(func, x0, args=(), callback=None, maxiter=None,\n maxfev=None, disp=False, return_all=False, initial_simplex=None, xatol=\n 0.0001, fatol=0.0001, **unknown_options):\n \"\"\"\n Minimization of scalar function of one or more variables using the\n Nelder-Mead algorithm.\n Options\n -------\n disp : bool\n Set to True to print convergence messages.\n maxiter, maxfev : int\n Maximum allowed number of iterations and function evaluations.\n Will default to ``N*200``, where ``N`` is the number of\n variables, if neither `maxiter` or `maxfev` is set. If both\n `maxiter` and `maxfev` are set, minimization will stop at the\n first reached.\n initial_simplex : array_like of shape (N + 1, N)\n Initial simplex. If given, overrides `x0`.\n ``initial_simplex[j,:]`` should contain the coordinates of\n the j-th vertex of the ``N+1`` vertices in the simplex, where\n ``N`` is the dimension.\n xatol : float, optional\n Absolute error in xopt between iterations that is acceptable for\n convergence.\n fatol : number, optional\n Absolute error in func(xopt) between iterations that is acceptable for\n convergence.\n \"\"\"\n maxfun = maxfev\n retall = return_all\n rho = 1\n chi = 2\n psi = 0.5\n sigma = 0.5\n nonzdelt = 0.05\n zdelt = 0.00025\n if initial_simplex is None:\n N = len(x0)\n sim = numpy.zeros((N + 1, N), dtype=x0.dtype)\n sim[0] = x0\n for k in range(N):\n y = numpy.array(x0, copy=True)\n if y[k] != 0:\n y[k] = (1 + nonzdelt) * y[k]\n else:\n y[k] = zdelt\n sim[k + 1] = y\n maxiter = 10\n maxfun = 10\n one2np1 = list(range(1, N + 1))\n fsim = numpy.zeros((N + 1,), float)\n for k in range(N + 1):\n fsim[k] = func(sim[k])\n ind = numpy.argsort(fsim)\n fsim = numpy.take(fsim, ind, 0)\n sim = numpy.take(sim, ind, 0)\n raise Exception()\n print('aaaaffaaaaaa')\n iterations = 1\n while iterations < maxiter:\n if numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))\n ) <= xatol and numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol:\n break\n logger.debug('itr: %s' % iterations)\n print('aaaaaaaaaa')\n xbar = numpy.add.reduce(sim[:-1], 0) / N\n xr = (1 + rho) * xbar - rho * sim[-1]\n fxr = func(xr)\n doshrink = 0\n if fxr < fsim[0]:\n xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]\n fxe = func(xe)\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n elif fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else:\n if fxr < fsim[-1]:\n xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]\n fxc = func(xc)\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink = 1\n else:\n xcc = (1 - psi) * xbar + psi * sim[-1]\n fxcc = func(xcc)\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma * (sim[j] - sim[0])\n fsim[j] = func(sim[j])\n ind = numpy.argsort(fsim)\n sim = numpy.take(sim, ind, 0)\n fsim = numpy.take(fsim, ind, 0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n x = sim[0]\n fval = numpy.min(fsim)\n warnflag = 0\n result = OptimizeResult(fun=fval, nit=iterations, nfev=0, status=\n warnflag, success=warnflag == 0, message=None, x=x, final_simplex=(\n sim, fsim))\n return result\n", "step-4": "import numpy\nfrom scipy.optimize import OptimizeResult\nfrom logging import getLogger\nlogger = getLogger(__name__)\n\n\ndef minimize_neldermead(func, x0, args=(), callback=None, maxiter=None,\n maxfev=None, disp=False, return_all=False, initial_simplex=None, xatol=\n 0.0001, fatol=0.0001, **unknown_options):\n \"\"\"\n Minimization of scalar function of one or more variables using the\n Nelder-Mead algorithm.\n Options\n -------\n disp : bool\n Set to True to print convergence messages.\n maxiter, maxfev : int\n Maximum allowed number of iterations and function evaluations.\n Will default to ``N*200``, where ``N`` is the number of\n variables, if neither `maxiter` or `maxfev` is set. If both\n `maxiter` and `maxfev` are set, minimization will stop at the\n first reached.\n initial_simplex : array_like of shape (N + 1, N)\n Initial simplex. If given, overrides `x0`.\n ``initial_simplex[j,:]`` should contain the coordinates of\n the j-th vertex of the ``N+1`` vertices in the simplex, where\n ``N`` is the dimension.\n xatol : float, optional\n Absolute error in xopt between iterations that is acceptable for\n convergence.\n fatol : number, optional\n Absolute error in func(xopt) between iterations that is acceptable for\n convergence.\n \"\"\"\n maxfun = maxfev\n retall = return_all\n rho = 1\n chi = 2\n psi = 0.5\n sigma = 0.5\n nonzdelt = 0.05\n zdelt = 0.00025\n if initial_simplex is None:\n N = len(x0)\n sim = numpy.zeros((N + 1, N), dtype=x0.dtype)\n sim[0] = x0\n for k in range(N):\n y = numpy.array(x0, copy=True)\n if y[k] != 0:\n y[k] = (1 + nonzdelt) * y[k]\n else:\n y[k] = zdelt\n sim[k + 1] = y\n maxiter = 10\n maxfun = 10\n one2np1 = list(range(1, N + 1))\n fsim = numpy.zeros((N + 1,), float)\n for k in range(N + 1):\n fsim[k] = func(sim[k])\n ind = numpy.argsort(fsim)\n fsim = numpy.take(fsim, ind, 0)\n sim = numpy.take(sim, ind, 0)\n raise Exception()\n print('aaaaffaaaaaa')\n iterations = 1\n while iterations < maxiter:\n if numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))\n ) <= xatol and numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol:\n break\n logger.debug('itr: %s' % iterations)\n print('aaaaaaaaaa')\n xbar = numpy.add.reduce(sim[:-1], 0) / N\n xr = (1 + rho) * xbar - rho * sim[-1]\n fxr = func(xr)\n doshrink = 0\n if fxr < fsim[0]:\n xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]\n fxe = func(xe)\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n elif fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else:\n if fxr < fsim[-1]:\n xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]\n fxc = func(xc)\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink = 1\n else:\n xcc = (1 - psi) * xbar + psi * sim[-1]\n fxcc = func(xcc)\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma * (sim[j] - sim[0])\n fsim[j] = func(sim[j])\n ind = numpy.argsort(fsim)\n sim = numpy.take(sim, ind, 0)\n fsim = numpy.take(fsim, ind, 0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n x = sim[0]\n fval = numpy.min(fsim)\n warnflag = 0\n result = OptimizeResult(fun=fval, nit=iterations, nfev=0, status=\n warnflag, success=warnflag == 0, message=None, x=x, final_simplex=(\n sim, fsim))\n return result\n", "step-5": "import numpy\nfrom scipy.optimize import OptimizeResult\n\nfrom logging import getLogger\n\nlogger = getLogger(__name__)\n\n\ndef minimize_neldermead(func, x0, args=(), callback=None,\n maxiter=None, maxfev=None, disp=False,\n return_all=False, initial_simplex=None,\n xatol=1e-4, fatol=1e-4, **unknown_options):\n \"\"\"\n Minimization of scalar function of one or more variables using the\n Nelder-Mead algorithm.\n Options\n -------\n disp : bool\n Set to True to print convergence messages.\n maxiter, maxfev : int\n Maximum allowed number of iterations and function evaluations.\n Will default to ``N*200``, where ``N`` is the number of\n variables, if neither `maxiter` or `maxfev` is set. If both\n `maxiter` and `maxfev` are set, minimization will stop at the\n first reached.\n initial_simplex : array_like of shape (N + 1, N)\n Initial simplex. If given, overrides `x0`.\n ``initial_simplex[j,:]`` should contain the coordinates of\n the j-th vertex of the ``N+1`` vertices in the simplex, where\n ``N`` is the dimension.\n xatol : float, optional\n Absolute error in xopt between iterations that is acceptable for\n convergence.\n fatol : number, optional\n Absolute error in func(xopt) between iterations that is acceptable for\n convergence.\n \"\"\"\n maxfun = maxfev\n retall = return_all\n\n rho = 1\n chi = 2\n psi = 0.5\n sigma = 0.5\n nonzdelt = 0.05\n zdelt = 0.00025\n\n if initial_simplex is None:\n N = len(x0)\n\n sim = numpy.zeros((N + 1, N), dtype=x0.dtype)\n sim[0] = x0\n for k in range(N):\n y = numpy.array(x0, copy=True)\n if y[k] != 0:\n y[k] = (1 + nonzdelt) * y[k]\n else:\n y[k] = zdelt\n sim[k + 1] = y\n\n maxiter = 10\n maxfun = 10\n\n one2np1 = list(range(1, N + 1))\n fsim = numpy.zeros((N + 1,), float)\n\n for k in range(N + 1):\n fsim[k] = func(sim[k])\n\n ind = numpy.argsort(fsim)\n fsim = numpy.take(fsim, ind, 0)\n # sort so sim[0,:] has the lowest function value\n sim = numpy.take(sim, ind, 0)\n raise Exception()\n print('aaaaffaaaaaa')\n\n iterations = 1\n\n while iterations < maxiter:\n if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xatol and\n numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol):\n break\n logger.debug('itr: %s' % iterations)\n print('aaaaaaaaaa')\n xbar = numpy.add.reduce(sim[:-1], 0) / N\n xr = (1 + rho) * xbar - rho * sim[-1]\n fxr = func(xr)\n doshrink = 0\n\n if fxr < fsim[0]:\n xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]\n fxe = func(xe)\n\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fsim[0] <= fxr\n if fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fxr >= fsim[-2]\n # Perform contraction\n if fxr < fsim[-1]:\n xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]\n fxc = func(xc)\n\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink = 1\n else:\n # Perform an inside contraction\n xcc = (1 - psi) * xbar + psi * sim[-1]\n fxcc = func(xcc)\n\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma * (sim[j] - sim[0])\n fsim[j] = func(sim[j])\n\n ind = numpy.argsort(fsim)\n sim = numpy.take(sim, ind, 0)\n fsim = numpy.take(fsim, ind, 0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n\n x = sim[0]\n fval = numpy.min(fsim)\n warnflag = 0\n\n result = OptimizeResult(fun=fval, nit=iterations, nfev=0,\n status=warnflag, success=(warnflag == 0),\n message=None, x=x, final_simplex=(sim, fsim))\n return result\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Generated by Django 3.1.7 on 2021-03-25 00:33 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('restaurante', '0003_auto_20210324_1932'), ] operations = [ migrations.AlterModelOptions( name='comprobantemodel', options={'verbose_name': 'Comprobante'}, ), migrations.AlterModelTable( name='comprobantemodel', table='t_comprobante', ), ]
normal
{ "blob_id": "f76a3fac75e7e2b156f4bff5094f11009b65b599", "index": 8822, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('restaurante', '0003_auto_20210324_1932')]\n operations = [migrations.AlterModelOptions(name='comprobantemodel',\n options={'verbose_name': 'Comprobante'}), migrations.\n AlterModelTable(name='comprobantemodel', table='t_comprobante')]\n", "step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('restaurante', '0003_auto_20210324_1932')]\n operations = [migrations.AlterModelOptions(name='comprobantemodel',\n options={'verbose_name': 'Comprobante'}), migrations.\n AlterModelTable(name='comprobantemodel', table='t_comprobante')]\n", "step-5": "# Generated by Django 3.1.7 on 2021-03-25 00:33\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('restaurante', '0003_auto_20210324_1932'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='comprobantemodel',\n options={'verbose_name': 'Comprobante'},\n ),\n migrations.AlterModelTable(\n name='comprobantemodel',\n table='t_comprobante',\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import cv2 import numpy as np from math import * def appendimages(im1,im2): """ Return a new image that appends the two images side-by-side. """ # select the image with the fewest rows and fill in enough empty rows rows1 = im1.shape[0] rows2 = im2.shape[0] if rows1 < rows2: im1 = np.concatenate((im1,zeros((rows2-rows1,im1.shape[1]))),axis=0) elif rows1 > rows2: im2 = np.concatenate((im2,zeros((rows1-rows2,im2.shape[1]))),axis=0) # if none of these cases they are equal, no filling needed. return np.concatenate((im1,im2), axis=1) def append_imgs(im1, im2, im3): #buff = appendimages(im1,im2) #return appendimages(buff,im3) buff = np.concatenate((im1,im2), axis=1) return np.concatenate((buff,im3), axis=1) #check whether the point is near edge or not def point_not_at_edge( x, y, img_height, img_width, threshold): no_at_edge = ( (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold ) ) return no_at_edge #check whether two points are too near from each other def points_not_similar(x, y, x_neighb, y_neighb, threshold): no_same_point = (fabs(x - x_neighb) + fabs(y - y_neighb) > 2*threshold) return no_same_point def good_points(x, y, x_next, y_next, img_height, img_width, threshold): no_same_point = (fabs(x - x_next) + fabs(y - y_next) > 2*threshold) no_at_edge = (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold ) return (no_same_point and no_at_edge) ''' calculate the point on wrist of the hand by taking the average of opposites of convexity defects to the center ''' def find_wrist(center, contour, set_idx_convDefs): n = len(set_idx_convDefs) opposites = np.zeros((2,n)) for i in range(n): opposites[0,i] = 2*center[0] - contour[set_idx_convDefs[i], 0, 0] #calcul x opposites[1,i] = 2*center[1] - contour[set_idx_convDefs[i], 0, 1] #calcul y total = np.sum(opposites, axis = 1) #print total x = int(total[0]/n) y = int(total[1]/n) wrist = (x, y) #print 'wrist = ', wrist return wrist ''' simple methods to detect finger tips by calculating the farthest points on convex hull compared to a fixed point. This fixed point can be center or wrist ''' def simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh): dist_from_fixedPoint = [] img_height, img_width = img.shape[0:2] hull_nbPts = hull.shape[0] #calculate distance to fixed Point for i in range(hull_nbPts): dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2)) #sort index from farthest to nearest max_indx = np.argsort(-1*np.array(dist_from_fixedPoint)) #need to eliminate same points and points at edge #results stored in idx_ok, the list of candidate indices of hulls idx_ok = [] for i in range(hull_nbPts): idx = max_indx[i] if point_not_at_edge(hull[idx,0,0], hull[idx,0,1], img_height, img_width, edge_thresh): if(len(idx_ok) == 0): idx_ok.append(idx) else: not_similar = True for idx_neighbor in idx_ok: not_similar = (points_not_similar(hull[idx,0,0], hull[idx,0,1], hull[idx_neighbor,0,0], hull[idx_neighbor,0,1],neighbor_thresh)) if not not_similar: #if similar break the loop break if(not_similar): idx_ok.append(idx) return idx_ok def simple_preprocessing(img): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray, (5,5), 0) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10)) blur = cv2.erode(blur, kernel, iterations = 2) blur = cv2.dilate(blur, kernel, iterations = 2) ret, bin_image = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) return bin_image def simple_preprocessing2(img, backGround): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray2 = cv2.cvtColor(backGround, cv2.COLOR_BGR2GRAY) gray = gray-gray2 blur = cv2.GaussianBlur(gray, (5,5), 0) #kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10)) #blur = cv2.erode(blur, kernel, iterations = 2) #blur = cv2.dilate(blur, kernel, iterations = 2) ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) return bin_image def hsv_preprocessing(img): #define boundaries of HSV pixel intensities to be considered as 'skin' #H: 2-39 / 360 * 255 = 1-28 #S: 0.15 - 0.9 / 1 * 255 = 38- 250 #V: 0.2 - 0.95 / 1 * 255 = lower = np.array([1, 38, 51]) upper = np.array([28, 250, 242]) hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #hsv = cv2.GaussianBlur(hsv, (5,5), 0) skinMask = cv2.inRange(hsv, lower, upper) #choosing a structure elements to apply noise-remove process kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10)) skinMask = cv2.erode(skinMask, kernel, iterations = 2) skinMask = cv2.dilate(skinMask, kernel, iterations = 2) blur = cv2.GaussianBlur(skinMask, (5,5), 0) ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) return bin_image def find_contour_hull(binary_image): #find the contour contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) #search the maximum contour in the hierachy tree of contours max_area = 0 ci = 0 for i in range(len(contours)): cnt = contours[i] area = cv2.contourArea(cnt) if(area > max_area): max_area = area ci = i cnt = contours[ci] hull = cv2.convexHull(cnt) hull_idx = cv2.convexHull(cnt, returnPoints = False) return cnt, hull, hull_idx def draws_contour_hull(img, cnt, hull): #draws the image with only the contour and its convex hull drawing = np.zeros(img.shape, np.uint8) cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3) cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3) return drawing def eliminate_background(img, backGround, thres_diff): height, width, depth = img.shape for i in range(height): for j in range(width): erase = True for k in range(depth): if(fabs(img[i,j,k] - backGround[i,j,k]) > thres_diff): erase = False if erase: img[i,j,:] = 0 return img ''' Tracking by camera NOTE: hsv is very color and light sensitive and simple_preprocessing seems stabler ''' ''' firstSec = 0 camera = cv2.VideoCapture(0) for i in range(12): camera.read() grabbed, backGround = camera.read() for i in range(12): grabbed, img = camera.read() backGround = backGround/2 + img/2 ''' def tracking(): camera = cv2.VideoCapture(0) _,img = camera.read() h,w,d = img.shape #out = cv2.VideoWriter('video.avi',-1,1,(3*w,h)) fourcc = cv2.cv.CV_FOURCC('F', 'M', 'P', '4') out = cv2.VideoWriter() success = out.open('output.avi',fourcc, 15, (3*w,h), True) waitTime = 100 for i in range(waitTime): _, average = camera.read() #average = np.float32(average) index_im = 0 while True: grabbed, img = camera.read() #alpha = 0.01 #factor of forgetting #cv2.accumulateWeighted(img, average, alpha)#img is src, average is dst img_diff = cv2.absdiff(img, average)#convert scale and do subtract these 2 images #cv2.imshow('img_diff', img_diff) #substract background #img = eliminate_background(img, backGround, 20) #bin_image = simple_preprocessing(img, backGround) bin_image = simple_preprocessing(img_diff) bin_image2 = bin_image.copy() cv2.imshow('binaire', bin_image2) # bin_image = hsv_preprocessing(img) # cv2.imshow('orig', img) # cv2.imshow('bin', bin_image) # cv2.waitKey(0) cnt, hull, hull_idx = find_contour_hull(bin_image) drawing = draws_contour_hull(img, cnt, hull) #search the points between each finger by using convexity defects #see the doc of opencv to understand implementation details convDefs = cv2.convexityDefects(cnt, hull_idx) dist_order = np.argsort((-1)*convDefs[:,0,3]) max4dist = dist_order[0:4] max4points = convDefs[max4dist,0,2] for i in max4points: cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2) hull_nbPts = hull.shape[0] ''' #draws all the points constitue the convex hull (for debugging) for i in range(hull_nbPts): cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2) cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA) ''' #find and draw center of contour moments = cv2.moments(cnt) if moments['m00']!=0: cx = int(moments['m10']/moments['m00']) # cx = M10/M00 cy = int(moments['m01']/moments['m00']) # cy = M01/M00 centr=(cx,cy) cv2.circle(drawing, centr, 5, [0, 255, 255], 2) #find and draw point represents the wrist of the hand wrist = find_wrist(centr, cnt, max4points) cv2.circle(drawing, wrist, 5, [0, 255, 255], 2) edge_thresh = 20 neighbor_thresh = 20 fixedPoint = wrist idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh) #print 'list of idx_ok = ', idx_ok max_5hull_idx = idx_ok[0:5] #print 'first five of idx_ok = ', max_5hull_idx for i in max_5hull_idx: cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2) #print hull[i] #print dist_from_center #cv2.imshow('contour and convex hull', drawing) img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) drawing = cv2.cvtColor(drawing, cv2.COLOR_BGR2GRAY) ''' print img.shape print bin_image2.shape print drawing.shape ''' frame = append_imgs(img, bin_image2, drawing) #cv2.imshow('frame', frame) #out.write(frame) cv2.imwrite("store2/" + "img"+str(index_im) + ".jpg", frame) index_im += 1 if cv2.waitKey(1) & 0xFF == ord("q"): break camera.release() out.release() #self.out = None cv2.destroyAllWindows() def main(): image_name = "hand_in_BG5.png" img = cv2.imread(image_name) bin_image = simple_preprocessing(img) #bin_image = hsv_preprocessing(img) cv2.imshow('orig', img) cv2.imshow('bin', bin_image) cv2.waitKey(0) cnt, hull, hull_idx = find_contour_hull(bin_image) drawing = draws_contour_hull(img, cnt, hull) #search the points between each finger by using convexity defects #see the doc of opencv to understand implementation details convDefs = cv2.convexityDefects(cnt, hull_idx) dist_order = np.argsort((-1)*convDefs[:,0,3]) max4dist = dist_order[0:4] max4points = convDefs[max4dist,0,2] for i in max4points: cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2) hull_nbPts = hull.shape[0] ''' #draws all the points constitue the convex hull (for debugging) for i in range(hull_nbPts): cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2) cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA) ''' #find and draw center of contour moments = cv2.moments(cnt) if moments['m00']!=0: cx = int(moments['m10']/moments['m00']) # cx = M10/M00 cy = int(moments['m01']/moments['m00']) # cy = M01/M00 centr=(cx,cy) cv2.circle(drawing, centr, 5, [0, 255, 255], 2) #find and draw point represents the wrist of the hand wrist = find_wrist(centr, cnt, max4points) cv2.circle(drawing, wrist, 5, [0, 255, 255], 2) edge_thresh = 20 neighbor_thresh = 20 fixedPoint = wrist idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh) #print 'list of idx_ok = ', idx_ok max_5hull_idx = idx_ok[0:1] #print 'first five of idx_ok = ', max_5hull_idx for i in max_5hull_idx: cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2) #print hull[i] #print dist_from_center cv2.imshow('contour and convex hull', drawing) k = cv2.waitKey(0) if __name__ == "__main__": # main() tracking()
normal
{ "blob_id": "c3e313805c6f91f9aac77922edfd09650143f905", "index": 4862, "step-1": "<mask token>\n\n\ndef appendimages(im1, im2):\n \"\"\" Return a new image that appends the two images side-by-side. \"\"\"\n rows1 = im1.shape[0]\n rows2 = im2.shape[0]\n if rows1 < rows2:\n im1 = np.concatenate((im1, zeros((rows2 - rows1, im1.shape[1]))),\n axis=0)\n elif rows1 > rows2:\n im2 = np.concatenate((im2, zeros((rows1 - rows2, im2.shape[1]))),\n axis=0)\n return np.concatenate((im1, im2), axis=1)\n\n\ndef append_imgs(im1, im2, im3):\n buff = np.concatenate((im1, im2), axis=1)\n return np.concatenate((buff, im3), axis=1)\n\n\n<mask token>\n\n\ndef good_points(x, y, x_next, y_next, img_height, img_width, threshold):\n no_same_point = fabs(x - x_next) + fabs(y - y_next) > 2 * threshold\n no_at_edge = x > threshold and y > threshold and fabs(x - img_width\n ) > threshold and fabs(y - img_height) > threshold\n return no_same_point and no_at_edge\n\n\n<mask token>\n\n\ndef find_wrist(center, contour, set_idx_convDefs):\n n = len(set_idx_convDefs)\n opposites = np.zeros((2, n))\n for i in range(n):\n opposites[0, i] = 2 * center[0] - contour[set_idx_convDefs[i], 0, 0]\n opposites[1, i] = 2 * center[1] - contour[set_idx_convDefs[i], 0, 1]\n total = np.sum(opposites, axis=1)\n x = int(total[0] / n)\n y = int(total[1] / n)\n wrist = x, y\n return wrist\n\n\n<mask token>\n\n\ndef simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,\n neighbor_thresh):\n dist_from_fixedPoint = []\n img_height, img_width = img.shape[0:2]\n hull_nbPts = hull.shape[0]\n for i in range(hull_nbPts):\n dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2)\n )\n max_indx = np.argsort(-1 * np.array(dist_from_fixedPoint))\n idx_ok = []\n for i in range(hull_nbPts):\n idx = max_indx[i]\n if point_not_at_edge(hull[idx, 0, 0], hull[idx, 0, 1], img_height,\n img_width, edge_thresh):\n if len(idx_ok) == 0:\n idx_ok.append(idx)\n else:\n not_similar = True\n for idx_neighbor in idx_ok:\n not_similar = points_not_similar(hull[idx, 0, 0], hull[\n idx, 0, 1], hull[idx_neighbor, 0, 0], hull[\n idx_neighbor, 0, 1], neighbor_thresh)\n if not not_similar:\n break\n if not_similar:\n idx_ok.append(idx)\n return idx_ok\n\n\n<mask token>\n\n\ndef hsv_preprocessing(img):\n lower = np.array([1, 38, 51])\n upper = np.array([28, 250, 242])\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n skinMask = cv2.inRange(hsv, lower, upper)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))\n skinMask = cv2.erode(skinMask, kernel, iterations=2)\n skinMask = cv2.dilate(skinMask, kernel, iterations=2)\n blur = cv2.GaussianBlur(skinMask, (5, 5), 0)\n ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.\n THRESH_OTSU)\n return bin_image\n\n\ndef find_contour_hull(binary_image):\n contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2\n .CHAIN_APPROX_SIMPLE)\n max_area = 0\n ci = 0\n for i in range(len(contours)):\n cnt = contours[i]\n area = cv2.contourArea(cnt)\n if area > max_area:\n max_area = area\n ci = i\n cnt = contours[ci]\n hull = cv2.convexHull(cnt)\n hull_idx = cv2.convexHull(cnt, returnPoints=False)\n return cnt, hull, hull_idx\n\n\ndef draws_contour_hull(img, cnt, hull):\n drawing = np.zeros(img.shape, np.uint8)\n cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)\n cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)\n return drawing\n\n\ndef eliminate_background(img, backGround, thres_diff):\n height, width, depth = img.shape\n for i in range(height):\n for j in range(width):\n erase = True\n for k in range(depth):\n if fabs(img[i, j, k] - backGround[i, j, k]) > thres_diff:\n erase = False\n if erase:\n img[i, j, :] = 0\n return img\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef appendimages(im1, im2):\n \"\"\" Return a new image that appends the two images side-by-side. \"\"\"\n rows1 = im1.shape[0]\n rows2 = im2.shape[0]\n if rows1 < rows2:\n im1 = np.concatenate((im1, zeros((rows2 - rows1, im1.shape[1]))),\n axis=0)\n elif rows1 > rows2:\n im2 = np.concatenate((im2, zeros((rows1 - rows2, im2.shape[1]))),\n axis=0)\n return np.concatenate((im1, im2), axis=1)\n\n\ndef append_imgs(im1, im2, im3):\n buff = np.concatenate((im1, im2), axis=1)\n return np.concatenate((buff, im3), axis=1)\n\n\n<mask token>\n\n\ndef points_not_similar(x, y, x_neighb, y_neighb, threshold):\n no_same_point = fabs(x - x_neighb) + fabs(y - y_neighb) > 2 * threshold\n return no_same_point\n\n\ndef good_points(x, y, x_next, y_next, img_height, img_width, threshold):\n no_same_point = fabs(x - x_next) + fabs(y - y_next) > 2 * threshold\n no_at_edge = x > threshold and y > threshold and fabs(x - img_width\n ) > threshold and fabs(y - img_height) > threshold\n return no_same_point and no_at_edge\n\n\n<mask token>\n\n\ndef find_wrist(center, contour, set_idx_convDefs):\n n = len(set_idx_convDefs)\n opposites = np.zeros((2, n))\n for i in range(n):\n opposites[0, i] = 2 * center[0] - contour[set_idx_convDefs[i], 0, 0]\n opposites[1, i] = 2 * center[1] - contour[set_idx_convDefs[i], 0, 1]\n total = np.sum(opposites, axis=1)\n x = int(total[0] / n)\n y = int(total[1] / n)\n wrist = x, y\n return wrist\n\n\n<mask token>\n\n\ndef simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,\n neighbor_thresh):\n dist_from_fixedPoint = []\n img_height, img_width = img.shape[0:2]\n hull_nbPts = hull.shape[0]\n for i in range(hull_nbPts):\n dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2)\n )\n max_indx = np.argsort(-1 * np.array(dist_from_fixedPoint))\n idx_ok = []\n for i in range(hull_nbPts):\n idx = max_indx[i]\n if point_not_at_edge(hull[idx, 0, 0], hull[idx, 0, 1], img_height,\n img_width, edge_thresh):\n if len(idx_ok) == 0:\n idx_ok.append(idx)\n else:\n not_similar = True\n for idx_neighbor in idx_ok:\n not_similar = points_not_similar(hull[idx, 0, 0], hull[\n idx, 0, 1], hull[idx_neighbor, 0, 0], hull[\n idx_neighbor, 0, 1], neighbor_thresh)\n if not not_similar:\n break\n if not_similar:\n idx_ok.append(idx)\n return idx_ok\n\n\n<mask token>\n\n\ndef hsv_preprocessing(img):\n lower = np.array([1, 38, 51])\n upper = np.array([28, 250, 242])\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n skinMask = cv2.inRange(hsv, lower, upper)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))\n skinMask = cv2.erode(skinMask, kernel, iterations=2)\n skinMask = cv2.dilate(skinMask, kernel, iterations=2)\n blur = cv2.GaussianBlur(skinMask, (5, 5), 0)\n ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.\n THRESH_OTSU)\n return bin_image\n\n\ndef find_contour_hull(binary_image):\n contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2\n .CHAIN_APPROX_SIMPLE)\n max_area = 0\n ci = 0\n for i in range(len(contours)):\n cnt = contours[i]\n area = cv2.contourArea(cnt)\n if area > max_area:\n max_area = area\n ci = i\n cnt = contours[ci]\n hull = cv2.convexHull(cnt)\n hull_idx = cv2.convexHull(cnt, returnPoints=False)\n return cnt, hull, hull_idx\n\n\ndef draws_contour_hull(img, cnt, hull):\n drawing = np.zeros(img.shape, np.uint8)\n cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)\n cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)\n return drawing\n\n\ndef eliminate_background(img, backGround, thres_diff):\n height, width, depth = img.shape\n for i in range(height):\n for j in range(width):\n erase = True\n for k in range(depth):\n if fabs(img[i, j, k] - backGround[i, j, k]) > thres_diff:\n erase = False\n if erase:\n img[i, j, :] = 0\n return img\n\n\n<mask token>\n\n\ndef main():\n image_name = 'hand_in_BG5.png'\n img = cv2.imread(image_name)\n bin_image = simple_preprocessing(img)\n cv2.imshow('orig', img)\n cv2.imshow('bin', bin_image)\n cv2.waitKey(0)\n cnt, hull, hull_idx = find_contour_hull(bin_image)\n drawing = draws_contour_hull(img, cnt, hull)\n convDefs = cv2.convexityDefects(cnt, hull_idx)\n dist_order = np.argsort(-1 * convDefs[:, 0, 3])\n max4dist = dist_order[0:4]\n max4points = convDefs[max4dist, 0, 2]\n for i in max4points:\n cv2.circle(drawing, tuple(cnt[i, 0]), 5, [255, 255, 0], 2)\n hull_nbPts = hull.shape[0]\n \"\"\"\n #draws all the points constitue the convex hull (for debugging)\n for i in range(hull_nbPts):\n cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2) \n cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)\n \"\"\"\n moments = cv2.moments(cnt)\n if moments['m00'] != 0:\n cx = int(moments['m10'] / moments['m00'])\n cy = int(moments['m01'] / moments['m00'])\n centr = cx, cy\n cv2.circle(drawing, centr, 5, [0, 255, 255], 2)\n wrist = find_wrist(centr, cnt, max4points)\n cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)\n edge_thresh = 20\n neighbor_thresh = 20\n fixedPoint = wrist\n idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,\n neighbor_thresh)\n max_5hull_idx = idx_ok[0:1]\n for i in max_5hull_idx:\n cv2.circle(drawing, tuple(hull[i, 0]), 6, [0, 255, 0], 2)\n cv2.imshow('contour and convex hull', drawing)\n k = cv2.waitKey(0)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef appendimages(im1, im2):\n \"\"\" Return a new image that appends the two images side-by-side. \"\"\"\n rows1 = im1.shape[0]\n rows2 = im2.shape[0]\n if rows1 < rows2:\n im1 = np.concatenate((im1, zeros((rows2 - rows1, im1.shape[1]))),\n axis=0)\n elif rows1 > rows2:\n im2 = np.concatenate((im2, zeros((rows1 - rows2, im2.shape[1]))),\n axis=0)\n return np.concatenate((im1, im2), axis=1)\n\n\ndef append_imgs(im1, im2, im3):\n buff = np.concatenate((im1, im2), axis=1)\n return np.concatenate((buff, im3), axis=1)\n\n\ndef point_not_at_edge(x, y, img_height, img_width, threshold):\n no_at_edge = x > threshold and y > threshold and fabs(x - img_width\n ) > threshold and fabs(y - img_height) > threshold\n return no_at_edge\n\n\ndef points_not_similar(x, y, x_neighb, y_neighb, threshold):\n no_same_point = fabs(x - x_neighb) + fabs(y - y_neighb) > 2 * threshold\n return no_same_point\n\n\ndef good_points(x, y, x_next, y_next, img_height, img_width, threshold):\n no_same_point = fabs(x - x_next) + fabs(y - y_next) > 2 * threshold\n no_at_edge = x > threshold and y > threshold and fabs(x - img_width\n ) > threshold and fabs(y - img_height) > threshold\n return no_same_point and no_at_edge\n\n\n<mask token>\n\n\ndef find_wrist(center, contour, set_idx_convDefs):\n n = len(set_idx_convDefs)\n opposites = np.zeros((2, n))\n for i in range(n):\n opposites[0, i] = 2 * center[0] - contour[set_idx_convDefs[i], 0, 0]\n opposites[1, i] = 2 * center[1] - contour[set_idx_convDefs[i], 0, 1]\n total = np.sum(opposites, axis=1)\n x = int(total[0] / n)\n y = int(total[1] / n)\n wrist = x, y\n return wrist\n\n\n<mask token>\n\n\ndef simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,\n neighbor_thresh):\n dist_from_fixedPoint = []\n img_height, img_width = img.shape[0:2]\n hull_nbPts = hull.shape[0]\n for i in range(hull_nbPts):\n dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2)\n )\n max_indx = np.argsort(-1 * np.array(dist_from_fixedPoint))\n idx_ok = []\n for i in range(hull_nbPts):\n idx = max_indx[i]\n if point_not_at_edge(hull[idx, 0, 0], hull[idx, 0, 1], img_height,\n img_width, edge_thresh):\n if len(idx_ok) == 0:\n idx_ok.append(idx)\n else:\n not_similar = True\n for idx_neighbor in idx_ok:\n not_similar = points_not_similar(hull[idx, 0, 0], hull[\n idx, 0, 1], hull[idx_neighbor, 0, 0], hull[\n idx_neighbor, 0, 1], neighbor_thresh)\n if not not_similar:\n break\n if not_similar:\n idx_ok.append(idx)\n return idx_ok\n\n\n<mask token>\n\n\ndef hsv_preprocessing(img):\n lower = np.array([1, 38, 51])\n upper = np.array([28, 250, 242])\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n skinMask = cv2.inRange(hsv, lower, upper)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))\n skinMask = cv2.erode(skinMask, kernel, iterations=2)\n skinMask = cv2.dilate(skinMask, kernel, iterations=2)\n blur = cv2.GaussianBlur(skinMask, (5, 5), 0)\n ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.\n THRESH_OTSU)\n return bin_image\n\n\ndef find_contour_hull(binary_image):\n contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2\n .CHAIN_APPROX_SIMPLE)\n max_area = 0\n ci = 0\n for i in range(len(contours)):\n cnt = contours[i]\n area = cv2.contourArea(cnt)\n if area > max_area:\n max_area = area\n ci = i\n cnt = contours[ci]\n hull = cv2.convexHull(cnt)\n hull_idx = cv2.convexHull(cnt, returnPoints=False)\n return cnt, hull, hull_idx\n\n\ndef draws_contour_hull(img, cnt, hull):\n drawing = np.zeros(img.shape, np.uint8)\n cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)\n cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)\n return drawing\n\n\ndef eliminate_background(img, backGround, thres_diff):\n height, width, depth = img.shape\n for i in range(height):\n for j in range(width):\n erase = True\n for k in range(depth):\n if fabs(img[i, j, k] - backGround[i, j, k]) > thres_diff:\n erase = False\n if erase:\n img[i, j, :] = 0\n return img\n\n\n<mask token>\n\n\ndef main():\n image_name = 'hand_in_BG5.png'\n img = cv2.imread(image_name)\n bin_image = simple_preprocessing(img)\n cv2.imshow('orig', img)\n cv2.imshow('bin', bin_image)\n cv2.waitKey(0)\n cnt, hull, hull_idx = find_contour_hull(bin_image)\n drawing = draws_contour_hull(img, cnt, hull)\n convDefs = cv2.convexityDefects(cnt, hull_idx)\n dist_order = np.argsort(-1 * convDefs[:, 0, 3])\n max4dist = dist_order[0:4]\n max4points = convDefs[max4dist, 0, 2]\n for i in max4points:\n cv2.circle(drawing, tuple(cnt[i, 0]), 5, [255, 255, 0], 2)\n hull_nbPts = hull.shape[0]\n \"\"\"\n #draws all the points constitue the convex hull (for debugging)\n for i in range(hull_nbPts):\n cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2) \n cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)\n \"\"\"\n moments = cv2.moments(cnt)\n if moments['m00'] != 0:\n cx = int(moments['m10'] / moments['m00'])\n cy = int(moments['m01'] / moments['m00'])\n centr = cx, cy\n cv2.circle(drawing, centr, 5, [0, 255, 255], 2)\n wrist = find_wrist(centr, cnt, max4points)\n cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)\n edge_thresh = 20\n neighbor_thresh = 20\n fixedPoint = wrist\n idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,\n neighbor_thresh)\n max_5hull_idx = idx_ok[0:1]\n for i in max_5hull_idx:\n cv2.circle(drawing, tuple(hull[i, 0]), 6, [0, 255, 0], 2)\n cv2.imshow('contour and convex hull', drawing)\n k = cv2.waitKey(0)\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef appendimages(im1, im2):\n \"\"\" Return a new image that appends the two images side-by-side. \"\"\"\n rows1 = im1.shape[0]\n rows2 = im2.shape[0]\n if rows1 < rows2:\n im1 = np.concatenate((im1, zeros((rows2 - rows1, im1.shape[1]))),\n axis=0)\n elif rows1 > rows2:\n im2 = np.concatenate((im2, zeros((rows1 - rows2, im2.shape[1]))),\n axis=0)\n return np.concatenate((im1, im2), axis=1)\n\n\ndef append_imgs(im1, im2, im3):\n buff = np.concatenate((im1, im2), axis=1)\n return np.concatenate((buff, im3), axis=1)\n\n\ndef point_not_at_edge(x, y, img_height, img_width, threshold):\n no_at_edge = x > threshold and y > threshold and fabs(x - img_width\n ) > threshold and fabs(y - img_height) > threshold\n return no_at_edge\n\n\ndef points_not_similar(x, y, x_neighb, y_neighb, threshold):\n no_same_point = fabs(x - x_neighb) + fabs(y - y_neighb) > 2 * threshold\n return no_same_point\n\n\ndef good_points(x, y, x_next, y_next, img_height, img_width, threshold):\n no_same_point = fabs(x - x_next) + fabs(y - y_next) > 2 * threshold\n no_at_edge = x > threshold and y > threshold and fabs(x - img_width\n ) > threshold and fabs(y - img_height) > threshold\n return no_same_point and no_at_edge\n\n\n<mask token>\n\n\ndef find_wrist(center, contour, set_idx_convDefs):\n n = len(set_idx_convDefs)\n opposites = np.zeros((2, n))\n for i in range(n):\n opposites[0, i] = 2 * center[0] - contour[set_idx_convDefs[i], 0, 0]\n opposites[1, i] = 2 * center[1] - contour[set_idx_convDefs[i], 0, 1]\n total = np.sum(opposites, axis=1)\n x = int(total[0] / n)\n y = int(total[1] / n)\n wrist = x, y\n return wrist\n\n\n<mask token>\n\n\ndef simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,\n neighbor_thresh):\n dist_from_fixedPoint = []\n img_height, img_width = img.shape[0:2]\n hull_nbPts = hull.shape[0]\n for i in range(hull_nbPts):\n dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2)\n )\n max_indx = np.argsort(-1 * np.array(dist_from_fixedPoint))\n idx_ok = []\n for i in range(hull_nbPts):\n idx = max_indx[i]\n if point_not_at_edge(hull[idx, 0, 0], hull[idx, 0, 1], img_height,\n img_width, edge_thresh):\n if len(idx_ok) == 0:\n idx_ok.append(idx)\n else:\n not_similar = True\n for idx_neighbor in idx_ok:\n not_similar = points_not_similar(hull[idx, 0, 0], hull[\n idx, 0, 1], hull[idx_neighbor, 0, 0], hull[\n idx_neighbor, 0, 1], neighbor_thresh)\n if not not_similar:\n break\n if not_similar:\n idx_ok.append(idx)\n return idx_ok\n\n\ndef simple_preprocessing(img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))\n blur = cv2.erode(blur, kernel, iterations=2)\n blur = cv2.dilate(blur, kernel, iterations=2)\n ret, bin_image = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY + cv2.\n THRESH_OTSU)\n return bin_image\n\n\ndef simple_preprocessing2(img, backGround):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n gray2 = cv2.cvtColor(backGround, cv2.COLOR_BGR2GRAY)\n gray = gray - gray2\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.\n THRESH_OTSU)\n return bin_image\n\n\ndef hsv_preprocessing(img):\n lower = np.array([1, 38, 51])\n upper = np.array([28, 250, 242])\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n skinMask = cv2.inRange(hsv, lower, upper)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))\n skinMask = cv2.erode(skinMask, kernel, iterations=2)\n skinMask = cv2.dilate(skinMask, kernel, iterations=2)\n blur = cv2.GaussianBlur(skinMask, (5, 5), 0)\n ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.\n THRESH_OTSU)\n return bin_image\n\n\ndef find_contour_hull(binary_image):\n contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2\n .CHAIN_APPROX_SIMPLE)\n max_area = 0\n ci = 0\n for i in range(len(contours)):\n cnt = contours[i]\n area = cv2.contourArea(cnt)\n if area > max_area:\n max_area = area\n ci = i\n cnt = contours[ci]\n hull = cv2.convexHull(cnt)\n hull_idx = cv2.convexHull(cnt, returnPoints=False)\n return cnt, hull, hull_idx\n\n\ndef draws_contour_hull(img, cnt, hull):\n drawing = np.zeros(img.shape, np.uint8)\n cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)\n cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)\n return drawing\n\n\ndef eliminate_background(img, backGround, thres_diff):\n height, width, depth = img.shape\n for i in range(height):\n for j in range(width):\n erase = True\n for k in range(depth):\n if fabs(img[i, j, k] - backGround[i, j, k]) > thres_diff:\n erase = False\n if erase:\n img[i, j, :] = 0\n return img\n\n\n<mask token>\n\n\ndef tracking():\n camera = cv2.VideoCapture(0)\n _, img = camera.read()\n h, w, d = img.shape\n fourcc = cv2.cv.CV_FOURCC('F', 'M', 'P', '4')\n out = cv2.VideoWriter()\n success = out.open('output.avi', fourcc, 15, (3 * w, h), True)\n waitTime = 100\n for i in range(waitTime):\n _, average = camera.read()\n index_im = 0\n while True:\n grabbed, img = camera.read()\n img_diff = cv2.absdiff(img, average)\n bin_image = simple_preprocessing(img_diff)\n bin_image2 = bin_image.copy()\n cv2.imshow('binaire', bin_image2)\n cnt, hull, hull_idx = find_contour_hull(bin_image)\n drawing = draws_contour_hull(img, cnt, hull)\n convDefs = cv2.convexityDefects(cnt, hull_idx)\n dist_order = np.argsort(-1 * convDefs[:, 0, 3])\n max4dist = dist_order[0:4]\n max4points = convDefs[max4dist, 0, 2]\n for i in max4points:\n cv2.circle(drawing, tuple(cnt[i, 0]), 5, [255, 255, 0], 2)\n hull_nbPts = hull.shape[0]\n \"\"\"\n #draws all the points constitue the convex hull (for debugging)\n for i in range(hull_nbPts):\n cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2) \n cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)\n \"\"\"\n moments = cv2.moments(cnt)\n if moments['m00'] != 0:\n cx = int(moments['m10'] / moments['m00'])\n cy = int(moments['m01'] / moments['m00'])\n centr = cx, cy\n cv2.circle(drawing, centr, 5, [0, 255, 255], 2)\n wrist = find_wrist(centr, cnt, max4points)\n cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)\n edge_thresh = 20\n neighbor_thresh = 20\n fixedPoint = wrist\n idx_ok = simple_detect_fingerTips(hull, img, fixedPoint,\n edge_thresh, neighbor_thresh)\n max_5hull_idx = idx_ok[0:5]\n for i in max_5hull_idx:\n cv2.circle(drawing, tuple(hull[i, 0]), 6, [0, 255, 0], 2)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n drawing = cv2.cvtColor(drawing, cv2.COLOR_BGR2GRAY)\n \"\"\"\n print img.shape\n print bin_image2.shape\n print drawing.shape\n \"\"\"\n frame = append_imgs(img, bin_image2, drawing)\n cv2.imwrite('store2/' + 'img' + str(index_im) + '.jpg', frame)\n index_im += 1\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n camera.release()\n out.release()\n cv2.destroyAllWindows()\n\n\ndef main():\n image_name = 'hand_in_BG5.png'\n img = cv2.imread(image_name)\n bin_image = simple_preprocessing(img)\n cv2.imshow('orig', img)\n cv2.imshow('bin', bin_image)\n cv2.waitKey(0)\n cnt, hull, hull_idx = find_contour_hull(bin_image)\n drawing = draws_contour_hull(img, cnt, hull)\n convDefs = cv2.convexityDefects(cnt, hull_idx)\n dist_order = np.argsort(-1 * convDefs[:, 0, 3])\n max4dist = dist_order[0:4]\n max4points = convDefs[max4dist, 0, 2]\n for i in max4points:\n cv2.circle(drawing, tuple(cnt[i, 0]), 5, [255, 255, 0], 2)\n hull_nbPts = hull.shape[0]\n \"\"\"\n #draws all the points constitue the convex hull (for debugging)\n for i in range(hull_nbPts):\n cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2) \n cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)\n \"\"\"\n moments = cv2.moments(cnt)\n if moments['m00'] != 0:\n cx = int(moments['m10'] / moments['m00'])\n cy = int(moments['m01'] / moments['m00'])\n centr = cx, cy\n cv2.circle(drawing, centr, 5, [0, 255, 255], 2)\n wrist = find_wrist(centr, cnt, max4points)\n cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)\n edge_thresh = 20\n neighbor_thresh = 20\n fixedPoint = wrist\n idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,\n neighbor_thresh)\n max_5hull_idx = idx_ok[0:1]\n for i in max_5hull_idx:\n cv2.circle(drawing, tuple(hull[i, 0]), 6, [0, 255, 0], 2)\n cv2.imshow('contour and convex hull', drawing)\n k = cv2.waitKey(0)\n\n\n<mask token>\n", "step-5": "import cv2\nimport numpy as np\nfrom math import *\n\n\ndef appendimages(im1,im2):\n \"\"\" Return a new image that appends the two images side-by-side. \"\"\"\n # select the image with the fewest rows and fill in enough empty rows\n rows1 = im1.shape[0]\n rows2 = im2.shape[0]\n if rows1 < rows2:\n im1 = np.concatenate((im1,zeros((rows2-rows1,im1.shape[1]))),axis=0)\n elif rows1 > rows2:\n im2 = np.concatenate((im2,zeros((rows1-rows2,im2.shape[1]))),axis=0)\n # if none of these cases they are equal, no filling needed.\n return np.concatenate((im1,im2), axis=1)\n\ndef append_imgs(im1, im2, im3):\n #buff = appendimages(im1,im2)\n #return appendimages(buff,im3)\n\n buff = np.concatenate((im1,im2), axis=1)\n return np.concatenate((buff,im3), axis=1)\n\n\n#check whether the point is near edge or not\ndef point_not_at_edge( x, y, img_height, img_width, threshold):\n no_at_edge = ( (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold ) )\n return no_at_edge\n \n#check whether two points are too near from each other \ndef points_not_similar(x, y, x_neighb, y_neighb, threshold):\n no_same_point = (fabs(x - x_neighb) + fabs(y - y_neighb) > 2*threshold)\n return no_same_point \n\n\ndef good_points(x, y, x_next, y_next, img_height, img_width, threshold):\n no_same_point = (fabs(x - x_next) + fabs(y - y_next) > 2*threshold)\n no_at_edge = (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold ) \n return (no_same_point and no_at_edge)\n\n'''\ncalculate the point on wrist of the hand\nby taking the average of opposites of convexity defects to the center\n''' \ndef find_wrist(center, contour, set_idx_convDefs):\n n = len(set_idx_convDefs)\n opposites = np.zeros((2,n))\n for i in range(n):\n opposites[0,i] = 2*center[0] - contour[set_idx_convDefs[i], 0, 0] #calcul x\n opposites[1,i] = 2*center[1] - contour[set_idx_convDefs[i], 0, 1] #calcul y\n \n total = np.sum(opposites, axis = 1)\n #print total\n x = int(total[0]/n)\n y = int(total[1]/n)\n wrist = (x, y)\n #print 'wrist = ', wrist\n return wrist\n\n'''\nsimple methods to detect finger tips\nby calculating the farthest points on convex hull\ncompared to a fixed point. This fixed point can be center or wrist\n'''\ndef simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh):\n dist_from_fixedPoint = []\n img_height, img_width = img.shape[0:2]\n hull_nbPts = hull.shape[0]\n\n #calculate distance to fixed Point\n for i in range(hull_nbPts):\n dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2))\n \n #sort index from farthest to nearest\n max_indx = np.argsort(-1*np.array(dist_from_fixedPoint))\n\n #need to eliminate same points and points at edge\n #results stored in idx_ok, the list of candidate indices of hulls \n idx_ok = []\n\n for i in range(hull_nbPts):\n idx = max_indx[i]\n if point_not_at_edge(hull[idx,0,0], hull[idx,0,1], img_height, img_width, edge_thresh):\n if(len(idx_ok) == 0):\n idx_ok.append(idx)\n else:\n not_similar = True\n for idx_neighbor in idx_ok:\n not_similar = (points_not_similar(hull[idx,0,0], hull[idx,0,1], hull[idx_neighbor,0,0], hull[idx_neighbor,0,1],neighbor_thresh))\n if not not_similar: #if similar break the loop \n break\n \n if(not_similar):\n idx_ok.append(idx) \n return idx_ok\n\ndef simple_preprocessing(img):\n \n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (5,5), 0)\n \n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))\n blur = cv2.erode(blur, kernel, iterations = 2)\n blur = cv2.dilate(blur, kernel, iterations = 2)\n\n ret, bin_image = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n return bin_image\n\n\ndef simple_preprocessing2(img, backGround):\n \n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n gray2 = cv2.cvtColor(backGround, cv2.COLOR_BGR2GRAY)\n\n gray = gray-gray2\n \n blur = cv2.GaussianBlur(gray, (5,5), 0)\n \n #kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))\n #blur = cv2.erode(blur, kernel, iterations = 2)\n #blur = cv2.dilate(blur, kernel, iterations = 2)\n\n ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n return bin_image\n\n\n\ndef hsv_preprocessing(img):\n \n #define boundaries of HSV pixel intensities to be considered as 'skin'\n #H: 2-39 / 360 * 255 = 1-28\n #S: 0.15 - 0.9 / 1 * 255 = 38- 250\n #V: 0.2 - 0.95 / 1 * 255 = \n lower = np.array([1, 38, 51])\n upper = np.array([28, 250, 242])\n\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n #hsv = cv2.GaussianBlur(hsv, (5,5), 0)\n skinMask = cv2.inRange(hsv, lower, upper)\n\n #choosing a structure elements to apply noise-remove process\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))\n skinMask = cv2.erode(skinMask, kernel, iterations = 2)\n skinMask = cv2.dilate(skinMask, kernel, iterations = 2)\n\n blur = cv2.GaussianBlur(skinMask, (5,5), 0)\n\n ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n return bin_image\n\n\ndef find_contour_hull(binary_image):\n #find the contour\n contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n #search the maximum contour in the hierachy tree of contours\n max_area = 0\n ci = 0\n for i in range(len(contours)):\n cnt = contours[i]\n area = cv2.contourArea(cnt)\n if(area > max_area):\n max_area = area\n ci = i\n\n cnt = contours[ci]\n hull = cv2.convexHull(cnt)\n hull_idx = cv2.convexHull(cnt, returnPoints = False)\n\n return cnt, hull, hull_idx\n\ndef draws_contour_hull(img, cnt, hull):\n #draws the image with only the contour and its convex hull\n drawing = np.zeros(img.shape, np.uint8)\n cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3) \n cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)\n return drawing\n\n\ndef eliminate_background(img, backGround, thres_diff):\n height, width, depth = img.shape\n for i in range(height):\n for j in range(width):\n erase = True\n for k in range(depth):\n if(fabs(img[i,j,k] - backGround[i,j,k]) > thres_diff):\n erase = False\n if erase:\n img[i,j,:] = 0\n return img\n'''\nTracking by camera\nNOTE: hsv is very color and light sensitive and simple_preprocessing seems stabler\n'''\n\n \n'''\n firstSec = 0\n camera = cv2.VideoCapture(0)\n for i in range(12):\n camera.read()\n \n grabbed, backGround = camera.read()\n for i in range(12):\n grabbed, img = camera.read()\n backGround = backGround/2 + img/2\n'''\n\n\ndef tracking():\n camera = cv2.VideoCapture(0)\n _,img = camera.read()\n \n h,w,d = img.shape\n\n #out = cv2.VideoWriter('video.avi',-1,1,(3*w,h))\n \n fourcc = cv2.cv.CV_FOURCC('F', 'M', 'P', '4')\n out = cv2.VideoWriter()\n success = out.open('output.avi',fourcc, 15, (3*w,h), True)\n \n\n waitTime = 100\n for i in range(waitTime):\n _, average = camera.read()\n\n #average = np.float32(average)\n index_im = 0\n while True:\n\n grabbed, img = camera.read()\n #alpha = 0.01 #factor of forgetting\n #cv2.accumulateWeighted(img, average, alpha)#img is src, average is dst\n\n img_diff = cv2.absdiff(img, average)#convert scale and do subtract these 2 images\n #cv2.imshow('img_diff', img_diff)\n \n #substract background\n #img = eliminate_background(img, backGround, 20)\n\n #bin_image = simple_preprocessing(img, backGround)\n bin_image = simple_preprocessing(img_diff)\n bin_image2 = bin_image.copy()\n cv2.imshow('binaire', bin_image2)\n # bin_image = hsv_preprocessing(img)\n\n # cv2.imshow('orig', img)\n # cv2.imshow('bin', bin_image)\n # cv2.waitKey(0)\n cnt, hull, hull_idx = find_contour_hull(bin_image)\n drawing = draws_contour_hull(img, cnt, hull)\n\n\n #search the points between each finger by using convexity defects\n #see the doc of opencv to understand implementation details\n\n convDefs = cv2.convexityDefects(cnt, hull_idx)\n dist_order = np.argsort((-1)*convDefs[:,0,3])\n max4dist = dist_order[0:4]\n max4points = convDefs[max4dist,0,2]\n\n for i in max4points:\n cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2) \n \n hull_nbPts = hull.shape[0]\n\n '''\n #draws all the points constitue the convex hull (for debugging)\n for i in range(hull_nbPts):\n cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2) \n cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)\n '''\n\n #find and draw center of contour\n moments = cv2.moments(cnt)\n if moments['m00']!=0:\n cx = int(moments['m10']/moments['m00']) # cx = M10/M00\n cy = int(moments['m01']/moments['m00']) # cy = M01/M00\n \n centr=(cx,cy) \n cv2.circle(drawing, centr, 5, [0, 255, 255], 2) \n\n #find and draw point represents the wrist of the hand\n wrist = find_wrist(centr, cnt, max4points)\n cv2.circle(drawing, wrist, 5, [0, 255, 255], 2) \n\n edge_thresh = 20\n neighbor_thresh = 20\n fixedPoint = wrist\n idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh)\n\n #print 'list of idx_ok = ', idx_ok\n max_5hull_idx = idx_ok[0:5]\n #print 'first five of idx_ok = ', max_5hull_idx\n\n for i in max_5hull_idx:\n cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2)\n #print hull[i]\n\n #print dist_from_center\n #cv2.imshow('contour and convex hull', drawing)\n \n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n drawing = cv2.cvtColor(drawing, cv2.COLOR_BGR2GRAY)\n '''\n print img.shape\n print bin_image2.shape\n print drawing.shape\n '''\n \n frame = append_imgs(img, bin_image2, drawing)\n \n #cv2.imshow('frame', frame)\n #out.write(frame)\n cv2.imwrite(\"store2/\" + \"img\"+str(index_im) + \".jpg\", frame)\n index_im += 1\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n \n camera.release()\n out.release()\n #self.out = None\n cv2.destroyAllWindows()\n\ndef main():\n\n image_name = \"hand_in_BG5.png\"\n\n img = cv2.imread(image_name)\n\n bin_image = simple_preprocessing(img)\n #bin_image = hsv_preprocessing(img)\n\n cv2.imshow('orig', img)\n cv2.imshow('bin', bin_image)\n cv2.waitKey(0)\n cnt, hull, hull_idx = find_contour_hull(bin_image)\n drawing = draws_contour_hull(img, cnt, hull)\n\n\n #search the points between each finger by using convexity defects\n #see the doc of opencv to understand implementation details\n\n convDefs = cv2.convexityDefects(cnt, hull_idx)\n dist_order = np.argsort((-1)*convDefs[:,0,3])\n max4dist = dist_order[0:4]\n max4points = convDefs[max4dist,0,2]\n\n for i in max4points:\n cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2) \n \n hull_nbPts = hull.shape[0]\n\n '''\n #draws all the points constitue the convex hull (for debugging)\n for i in range(hull_nbPts):\n cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2) \n cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)\n '''\n\n #find and draw center of contour\n moments = cv2.moments(cnt)\n if moments['m00']!=0:\n cx = int(moments['m10']/moments['m00']) # cx = M10/M00\n cy = int(moments['m01']/moments['m00']) # cy = M01/M00\n \n centr=(cx,cy) \n cv2.circle(drawing, centr, 5, [0, 255, 255], 2) \n\n #find and draw point represents the wrist of the hand\n wrist = find_wrist(centr, cnt, max4points)\n cv2.circle(drawing, wrist, 5, [0, 255, 255], 2) \n\n edge_thresh = 20\n neighbor_thresh = 20\n fixedPoint = wrist\n idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh)\n\n #print 'list of idx_ok = ', idx_ok\n max_5hull_idx = idx_ok[0:1]\n #print 'first five of idx_ok = ', max_5hull_idx\n\n for i in max_5hull_idx:\n cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2)\n #print hull[i]\n\n #print dist_from_center\n cv2.imshow('contour and convex hull', drawing)\n k = cv2.waitKey(0)\n\n\nif __name__ == \"__main__\":\n # main()\n tracking()\n", "step-ids": [ 9, 11, 12, 15, 18 ] }
[ 9, 11, 12, 15, 18 ]
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-03-15 15:20 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('challenges', '0019_auto_20170310_1114'), ] operations = [ migrations.AddField( model_name='challenge', name='supported_languages', field=models.ManyToManyField(to='challenges.Language'), ), ]
normal
{ "blob_id": "6b7ff00eb9a5d0837def5b245ba2d4a0acec972e", "index": 3466, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('challenges', '0019_auto_20170310_1114')]\n operations = [migrations.AddField(model_name='challenge', name=\n 'supported_languages', field=models.ManyToManyField(to=\n 'challenges.Language'))]\n", "step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('challenges', '0019_auto_20170310_1114')]\n operations = [migrations.AddField(model_name='challenge', name=\n 'supported_languages', field=models.ManyToManyField(to=\n 'challenges.Language'))]\n", "step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-03-15 15:20\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('challenges', '0019_auto_20170310_1114'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='challenge',\n name='supported_languages',\n field=models.ManyToManyField(to='challenges.Language'),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import unittest from HTMLTestRunner import HTMLTestRunner discover = unittest.defaultTestLoader.discover(start_dir='./', pattern='test*.py', top_level_dir=None) f = open('report.html', 'wb+') runner = HTMLTestRunner(stream=f, title="web自动化", description="自动化测试报告详情") runner.run(discover) f.close()
normal
{ "blob_id": "051062a78d3f8b0caefd15f7a57a8500ddc019a6", "index": 9290, "step-1": "<mask token>\n", "step-2": "<mask token>\nrunner.run(discover)\nf.close()\n", "step-3": "<mask token>\ndiscover = unittest.defaultTestLoader.discover(start_dir='./', pattern=\n 'test*.py', top_level_dir=None)\nf = open('report.html', 'wb+')\nrunner = HTMLTestRunner(stream=f, title='web自动化', description='自动化测试报告详情')\nrunner.run(discover)\nf.close()\n", "step-4": "import unittest\nfrom HTMLTestRunner import HTMLTestRunner\ndiscover = unittest.defaultTestLoader.discover(start_dir='./', pattern=\n 'test*.py', top_level_dir=None)\nf = open('report.html', 'wb+')\nrunner = HTMLTestRunner(stream=f, title='web自动化', description='自动化测试报告详情')\nrunner.run(discover)\nf.close()\n", "step-5": "import unittest\nfrom HTMLTestRunner import HTMLTestRunner\n\ndiscover = unittest.defaultTestLoader.discover(start_dir='./',\n pattern='test*.py',\n top_level_dir=None)\nf = open('report.html', 'wb+')\nrunner = HTMLTestRunner(stream=f,\n title=\"web自动化\",\n description=\"自动化测试报告详情\")\nrunner.run(discover)\nf.close()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
"""Settings module for test app.""" ENV = "development" TESTING = True SQLALCHEMY_DATABASE_URI = "sqlite://" SECRET_KEY = "not-so-secret-in-tests" DEBUG_TB_ENABLED = False SQLALCHEMY_TRACK_MODIFICATIONS = False APP_ENV = "testing" JWT_SECRET_KEY = ( "-----BEGIN RSA PRIVATE KEY-----\n" "MIICWwIBAAKBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9iR3fy4arWNA1KoS8kVw33cJi" "bXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a0" "3GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQABAoGAD+onAtVye4i" "c7VR7V50DF9bOnwRwNXrARcDhq9LWNRrRGElESYYTQ6EbatXS3MCyjjX2eMhu/aF5YhXB" "wkppwxg+EOmXeh+MzL7Zh284OuPbkglAaGhV9bb6/5CpuGb1esyPbYW+Ty2PC0GSZfIXk" "Xs76jXAu9TOBvD0ybc2YlkCQQDywg2R/7t3Q2OE2+yo382CLJdrlSLVROWKwb4tb2PjhY" "4XAwV8d1vy0RenxTB+K5Mu57uVSTHtrMK0GAtFr833AkEA6avx20OHo61Yela/4k5kQDt" "jEf1N0LfI+BcWZtxsS3jDM3i1Hp0KSu5rsCPb8acJo5RO26gGVrfAsDcIXKC+bQJAZZ2X" "IpsitLyPpuiMOvBbzPavd4gY6Z8KWrfYzJoI/Q9FuBo6rKwl4BFoToD7WIUS+hpkagwWi" "z+6zLoX1dbOZwJACmH5fSSjAkLRi54PKJ8TFUeOP15h9sQzydI8zJU+upvDEKZsZc/UhT" "/SySDOxQ4G/523Y0sz/OZtSWcol/UMgQJALesy++GdvoIDLfJX5GBQpuFgFenRiRDabxr" "E9MNUZ2aPFaFp+DyAe+b4nDwuJaW2LURbr8AEZga7oQj0uYxcYw==" "\n-----END RSA PRIVATE KEY-----" ) JWT_PUBLIC_KEY = ( "-----BEGIN PUBLIC KEY-----\n" "MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9" "iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+M" "uSUMAe8jzKE4qW+jK+xQU9a03GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRg" "EKwIDAQAB" "\n-----END PUBLIC KEY-----" )
normal
{ "blob_id": "909ea7b9335a858662f83abc71b4d58578bd0850", "index": 8261, "step-1": "<mask token>\n", "step-2": "<mask token>\nENV = 'development'\nTESTING = True\nSQLALCHEMY_DATABASE_URI = 'sqlite://'\nSECRET_KEY = 'not-so-secret-in-tests'\nDEBUG_TB_ENABLED = False\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nAPP_ENV = 'testing'\nJWT_SECRET_KEY = \"\"\"-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a03GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQABAoGAD+onAtVye4ic7VR7V50DF9bOnwRwNXrARcDhq9LWNRrRGElESYYTQ6EbatXS3MCyjjX2eMhu/aF5YhXBwkppwxg+EOmXeh+MzL7Zh284OuPbkglAaGhV9bb6/5CpuGb1esyPbYW+Ty2PC0GSZfIXkXs76jXAu9TOBvD0ybc2YlkCQQDywg2R/7t3Q2OE2+yo382CLJdrlSLVROWKwb4tb2PjhY4XAwV8d1vy0RenxTB+K5Mu57uVSTHtrMK0GAtFr833AkEA6avx20OHo61Yela/4k5kQDtjEf1N0LfI+BcWZtxsS3jDM3i1Hp0KSu5rsCPb8acJo5RO26gGVrfAsDcIXKC+bQJAZZ2XIpsitLyPpuiMOvBbzPavd4gY6Z8KWrfYzJoI/Q9FuBo6rKwl4BFoToD7WIUS+hpkagwWiz+6zLoX1dbOZwJACmH5fSSjAkLRi54PKJ8TFUeOP15h9sQzydI8zJU+upvDEKZsZc/UhT/SySDOxQ4G/523Y0sz/OZtSWcol/UMgQJALesy++GdvoIDLfJX5GBQpuFgFenRiRDabxrE9MNUZ2aPFaFp+DyAe+b4nDwuJaW2LURbr8AEZga7oQj0uYxcYw==\n-----END RSA PRIVATE KEY-----\"\"\"\nJWT_PUBLIC_KEY = \"\"\"-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a03GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQAB\n-----END PUBLIC KEY-----\"\"\"\n", "step-3": "\"\"\"Settings module for test app.\"\"\"\nENV = \"development\"\nTESTING = True\nSQLALCHEMY_DATABASE_URI = \"sqlite://\"\nSECRET_KEY = \"not-so-secret-in-tests\"\nDEBUG_TB_ENABLED = False\nSQLALCHEMY_TRACK_MODIFICATIONS = False\n\nAPP_ENV = \"testing\"\n\n\nJWT_SECRET_KEY = (\n \"-----BEGIN RSA PRIVATE KEY-----\\n\"\n \"MIICWwIBAAKBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9iR3fy4arWNA1KoS8kVw33cJi\"\n \"bXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a0\"\n \"3GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQABAoGAD+onAtVye4i\"\n \"c7VR7V50DF9bOnwRwNXrARcDhq9LWNRrRGElESYYTQ6EbatXS3MCyjjX2eMhu/aF5YhXB\"\n \"wkppwxg+EOmXeh+MzL7Zh284OuPbkglAaGhV9bb6/5CpuGb1esyPbYW+Ty2PC0GSZfIXk\"\n \"Xs76jXAu9TOBvD0ybc2YlkCQQDywg2R/7t3Q2OE2+yo382CLJdrlSLVROWKwb4tb2PjhY\"\n \"4XAwV8d1vy0RenxTB+K5Mu57uVSTHtrMK0GAtFr833AkEA6avx20OHo61Yela/4k5kQDt\"\n \"jEf1N0LfI+BcWZtxsS3jDM3i1Hp0KSu5rsCPb8acJo5RO26gGVrfAsDcIXKC+bQJAZZ2X\"\n \"IpsitLyPpuiMOvBbzPavd4gY6Z8KWrfYzJoI/Q9FuBo6rKwl4BFoToD7WIUS+hpkagwWi\"\n \"z+6zLoX1dbOZwJACmH5fSSjAkLRi54PKJ8TFUeOP15h9sQzydI8zJU+upvDEKZsZc/UhT\"\n \"/SySDOxQ4G/523Y0sz/OZtSWcol/UMgQJALesy++GdvoIDLfJX5GBQpuFgFenRiRDabxr\"\n \"E9MNUZ2aPFaFp+DyAe+b4nDwuJaW2LURbr8AEZga7oQj0uYxcYw==\"\n \"\\n-----END RSA PRIVATE KEY-----\"\n)\n\nJWT_PUBLIC_KEY = (\n \"-----BEGIN PUBLIC KEY-----\\n\"\n \"MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9\"\n \"iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+M\"\n \"uSUMAe8jzKE4qW+jK+xQU9a03GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRg\"\n \"EKwIDAQAB\"\n \"\\n-----END PUBLIC KEY-----\"\n)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from django import forms from django.core import validators class NameSearch(forms.Form): name = forms.CharField(label='Search By Name')
normal
{ "blob_id": "7620ff333422d0354cc41c2a66444c3e8a0c011f", "index": 1606, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass NameSearch(forms.Form):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass NameSearch(forms.Form):\n name = forms.CharField(label='Search By Name')\n", "step-4": "from django import forms\nfrom django.core import validators\n\n\nclass NameSearch(forms.Form):\n name = forms.CharField(label='Search By Name')\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
""" Auxiliary functions for calculating the utility of achieving a certain data rate (for a UE). Attention: The absolute reward that's achieved with different utilities cannot be compared directly (diff ranges)! """ import numpy as np from deepcomp.util.constants import MIN_UTILITY, MAX_UTILITY def linear_clipped_utility(curr_dr, max_dr=MAX_UTILITY): """ Utility that directly equals the data rate, increasing linearly up to a given maximum. :param max_dr: Maximum data rate at which the utility does not increase further :return: Utility """ assert curr_dr >= 0 and max_dr >= 0 assert MIN_UTILITY == 0 and MAX_UTILITY == max_dr, \ "The chosen linear utility requires MIN_UTILITY=0 and sensible MAX_UTILITY. Set sensible values manually!" return np.clip(curr_dr, MIN_UTILITY, MAX_UTILITY) def step_utility(curr_dr, req_dr): """ Flat negative utility as long as the required data rate is not met; then positive. Nothing in between. :param curr_dr: Current data rate :param req_dr: Required data rate :return: Min or max utility depending on whether the required data rate is met """ if curr_dr >= req_dr: return MAX_UTILITY return MIN_UTILITY def log_utility(curr_dr): """ More data rate increases the utility following a log function: High initial increase, then flattens. :param curr_dr: Current data rate :param factor: Factor to multiply the log function with :param add: Add to current data rate before passing to log function :return: Utility """ # 4*log(0.1+x) looks good: around -10 for no dr; 0 for 0.9 dr; slightly positive for more # 10*log10(0.1+x) is even better because it's steeper, is exactly -10 for dr=0, and flatter for larger dr # with many UEs where each UE only gets around 0.1 data rate, 100*log(0.9+x) looks good (eg, 50 UEs on medium env) # better: 10*log10(x) --> clip to [-20, 20]; -20 for <= 0.01 dr; +20 for >= 100 dr # ensure min/max utility are set correctly for this utility function assert MIN_UTILITY == -20 and MAX_UTILITY == 20, "The chosen log utility requires min/max utility to be -20/+20" if curr_dr == 0: return MIN_UTILITY return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)
normal
{ "blob_id": "e3de072d6bce2ecc105306c06b9a9aa0362130ff", "index": 6234, "step-1": "<mask token>\n\n\ndef log_utility(curr_dr):\n \"\"\"\n More data rate increases the utility following a log function: High initial increase, then flattens.\n\n :param curr_dr: Current data rate\n :param factor: Factor to multiply the log function with\n :param add: Add to current data rate before passing to log function\n :return: Utility\n \"\"\"\n assert MIN_UTILITY == -20 and MAX_UTILITY == 20, 'The chosen log utility requires min/max utility to be -20/+20'\n if curr_dr == 0:\n return MIN_UTILITY\n return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)\n", "step-2": "<mask token>\n\n\ndef step_utility(curr_dr, req_dr):\n \"\"\"\n Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.\n\n :param curr_dr: Current data rate\n :param req_dr: Required data rate\n :return: Min or max utility depending on whether the required data rate is met\n \"\"\"\n if curr_dr >= req_dr:\n return MAX_UTILITY\n return MIN_UTILITY\n\n\ndef log_utility(curr_dr):\n \"\"\"\n More data rate increases the utility following a log function: High initial increase, then flattens.\n\n :param curr_dr: Current data rate\n :param factor: Factor to multiply the log function with\n :param add: Add to current data rate before passing to log function\n :return: Utility\n \"\"\"\n assert MIN_UTILITY == -20 and MAX_UTILITY == 20, 'The chosen log utility requires min/max utility to be -20/+20'\n if curr_dr == 0:\n return MIN_UTILITY\n return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)\n", "step-3": "<mask token>\n\n\ndef linear_clipped_utility(curr_dr, max_dr=MAX_UTILITY):\n \"\"\"\n Utility that directly equals the data rate, increasing linearly up to a given maximum.\n\n :param max_dr: Maximum data rate at which the utility does not increase further\n :return: Utility\n \"\"\"\n assert curr_dr >= 0 and max_dr >= 0\n assert MIN_UTILITY == 0 and MAX_UTILITY == max_dr, 'The chosen linear utility requires MIN_UTILITY=0 and sensible MAX_UTILITY. Set sensible values manually!'\n return np.clip(curr_dr, MIN_UTILITY, MAX_UTILITY)\n\n\ndef step_utility(curr_dr, req_dr):\n \"\"\"\n Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.\n\n :param curr_dr: Current data rate\n :param req_dr: Required data rate\n :return: Min or max utility depending on whether the required data rate is met\n \"\"\"\n if curr_dr >= req_dr:\n return MAX_UTILITY\n return MIN_UTILITY\n\n\ndef log_utility(curr_dr):\n \"\"\"\n More data rate increases the utility following a log function: High initial increase, then flattens.\n\n :param curr_dr: Current data rate\n :param factor: Factor to multiply the log function with\n :param add: Add to current data rate before passing to log function\n :return: Utility\n \"\"\"\n assert MIN_UTILITY == -20 and MAX_UTILITY == 20, 'The chosen log utility requires min/max utility to be -20/+20'\n if curr_dr == 0:\n return MIN_UTILITY\n return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)\n", "step-4": "<mask token>\nimport numpy as np\nfrom deepcomp.util.constants import MIN_UTILITY, MAX_UTILITY\n\n\ndef linear_clipped_utility(curr_dr, max_dr=MAX_UTILITY):\n \"\"\"\n Utility that directly equals the data rate, increasing linearly up to a given maximum.\n\n :param max_dr: Maximum data rate at which the utility does not increase further\n :return: Utility\n \"\"\"\n assert curr_dr >= 0 and max_dr >= 0\n assert MIN_UTILITY == 0 and MAX_UTILITY == max_dr, 'The chosen linear utility requires MIN_UTILITY=0 and sensible MAX_UTILITY. Set sensible values manually!'\n return np.clip(curr_dr, MIN_UTILITY, MAX_UTILITY)\n\n\ndef step_utility(curr_dr, req_dr):\n \"\"\"\n Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.\n\n :param curr_dr: Current data rate\n :param req_dr: Required data rate\n :return: Min or max utility depending on whether the required data rate is met\n \"\"\"\n if curr_dr >= req_dr:\n return MAX_UTILITY\n return MIN_UTILITY\n\n\ndef log_utility(curr_dr):\n \"\"\"\n More data rate increases the utility following a log function: High initial increase, then flattens.\n\n :param curr_dr: Current data rate\n :param factor: Factor to multiply the log function with\n :param add: Add to current data rate before passing to log function\n :return: Utility\n \"\"\"\n assert MIN_UTILITY == -20 and MAX_UTILITY == 20, 'The chosen log utility requires min/max utility to be -20/+20'\n if curr_dr == 0:\n return MIN_UTILITY\n return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)\n", "step-5": "\"\"\"\nAuxiliary functions for calculating the utility of achieving a certain data rate (for a UE).\nAttention: The absolute reward that's achieved with different utilities cannot be compared directly (diff ranges)!\n\"\"\"\nimport numpy as np\n\nfrom deepcomp.util.constants import MIN_UTILITY, MAX_UTILITY\n\n\ndef linear_clipped_utility(curr_dr, max_dr=MAX_UTILITY):\n \"\"\"\n Utility that directly equals the data rate, increasing linearly up to a given maximum.\n\n :param max_dr: Maximum data rate at which the utility does not increase further\n :return: Utility\n \"\"\"\n assert curr_dr >= 0 and max_dr >= 0\n assert MIN_UTILITY == 0 and MAX_UTILITY == max_dr, \\\n \"The chosen linear utility requires MIN_UTILITY=0 and sensible MAX_UTILITY. Set sensible values manually!\"\n return np.clip(curr_dr, MIN_UTILITY, MAX_UTILITY)\n\n\ndef step_utility(curr_dr, req_dr):\n \"\"\"\n Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.\n\n :param curr_dr: Current data rate\n :param req_dr: Required data rate\n :return: Min or max utility depending on whether the required data rate is met\n \"\"\"\n if curr_dr >= req_dr:\n return MAX_UTILITY\n return MIN_UTILITY\n\n\ndef log_utility(curr_dr):\n \"\"\"\n More data rate increases the utility following a log function: High initial increase, then flattens.\n\n :param curr_dr: Current data rate\n :param factor: Factor to multiply the log function with\n :param add: Add to current data rate before passing to log function\n :return: Utility\n \"\"\"\n # 4*log(0.1+x) looks good: around -10 for no dr; 0 for 0.9 dr; slightly positive for more\n # 10*log10(0.1+x) is even better because it's steeper, is exactly -10 for dr=0, and flatter for larger dr\n # with many UEs where each UE only gets around 0.1 data rate, 100*log(0.9+x) looks good (eg, 50 UEs on medium env)\n\n # better: 10*log10(x) --> clip to [-20, 20]; -20 for <= 0.01 dr; +20 for >= 100 dr\n # ensure min/max utility are set correctly for this utility function\n assert MIN_UTILITY == -20 and MAX_UTILITY == 20, \"The chosen log utility requires min/max utility to be -20/+20\"\n if curr_dr == 0:\n return MIN_UTILITY\n return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> while True: speech.say('I am a DALEK - EXTERMINATE', speed=120, pitch=100, throat= 100, mouth=200) <|reserved_special_token_1|> from microbit import * import speech while True: speech.say('I am a DALEK - EXTERMINATE', speed=120, pitch=100, throat= 100, mouth=200) <|reserved_special_token_1|> from microbit import * import speech while True: speech.say("I am a DALEK - EXTERMINATE", speed=120, pitch=100, throat=100, mouth=200) #kokeile muuttaa parametrejä
flexible
{ "blob_id": "dad78d7948fb1038f9cf66732f39c18a18f2a3c8", "index": 5233, "step-1": "<mask token>\n", "step-2": "<mask token>\nwhile True:\n speech.say('I am a DALEK - EXTERMINATE', speed=120, pitch=100, throat=\n 100, mouth=200)\n", "step-3": "from microbit import *\nimport speech\nwhile True:\n speech.say('I am a DALEK - EXTERMINATE', speed=120, pitch=100, throat=\n 100, mouth=200)\n", "step-4": "from microbit import *\n\nimport speech\n\n\nwhile True:\n speech.say(\"I am a DALEK - EXTERMINATE\", speed=120, pitch=100, throat=100, mouth=200) #kokeile muuttaa parametrejä\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> urlpatterns = [path('admin/', admin.site.urls), path('', views.index, name= 'index')] <|reserved_special_token_1|> from django.contrib import admin from django.urls import path from django.conf.urls import url from . import views urlpatterns = [path('admin/', admin.site.urls), path('', views.index, name= 'index')] <|reserved_special_token_1|> from django.contrib import admin from django.urls import path from django.conf.urls import url from . import views urlpatterns = [ path('admin/', admin.site.urls), path(r'', views.index, name='index'), ]
flexible
{ "blob_id": "b0fad3847519bb18365a8cd4226d06e9d96a8308", "index": 1258, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [path('admin/', admin.site.urls), path('', views.index, name=\n 'index')]\n", "step-3": "from django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url\nfrom . import views\nurlpatterns = [path('admin/', admin.site.urls), path('', views.index, name=\n 'index')]\n", "step-4": "from django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url\nfrom . import views\nurlpatterns = [\n path('admin/', admin.site.urls),\n path(r'', views.index, name='index'),\n]\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> class FizzBuzzTest(unittest.TestCase): def check_fizz_buzz(self, value, expected): result = fizz_buzz(value) self.assertEqual(expected, result) <|reserved_special_token_0|> def test_fizz_buzz__fizz_buzz_2_2(self): self.check_fizz_buzz(2, '2') def test_fizz_buzz__fizz_buzz_3_Fizz(self): self.check_fizz_buzz(3, 'Fizz') def test_fizz_buzz__fizz_buzz_5_Buzz(self): self.check_fizz_buzz(5, 'Buzz') <|reserved_special_token_0|> def test_fizz_buzz__fizz_buzz_10_Buzz(self): self.check_fizz_buzz(10, 'Buzz') def test_fizz_buzz__fizz_buzz_15_FizzBuzz(self): self.check_fizz_buzz(15, 'FizzBuzz') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def fizz_buzz(value): if is_multiple(value, 5) and is_multiple(value, 3): return 'FizzBuzz' if is_multiple(value, 3): return 'Fizz' if is_multiple(value, 5): return 'Buzz' return str(value) class FizzBuzzTest(unittest.TestCase): def check_fizz_buzz(self, value, expected): result = fizz_buzz(value) self.assertEqual(expected, result) def test_fizz_buzz__fizz_buzz_1_1(self): self.check_fizz_buzz(1, '1') def test_fizz_buzz__fizz_buzz_2_2(self): self.check_fizz_buzz(2, '2') def test_fizz_buzz__fizz_buzz_3_Fizz(self): self.check_fizz_buzz(3, 'Fizz') def test_fizz_buzz__fizz_buzz_5_Buzz(self): self.check_fizz_buzz(5, 'Buzz') def test_fizz_buzz__fizz_buzz_6_Fizz(self): self.check_fizz_buzz(6, 'Fizz') def test_fizz_buzz__fizz_buzz_10_Buzz(self): self.check_fizz_buzz(10, 'Buzz') def test_fizz_buzz__fizz_buzz_15_FizzBuzz(self): self.check_fizz_buzz(15, 'FizzBuzz') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def is_multiple(value, base): return 0 == value % base def fizz_buzz(value): if is_multiple(value, 5) and is_multiple(value, 3): return 'FizzBuzz' if is_multiple(value, 3): return 'Fizz' if is_multiple(value, 5): return 'Buzz' return str(value) class FizzBuzzTest(unittest.TestCase): def check_fizz_buzz(self, value, expected): result = fizz_buzz(value) self.assertEqual(expected, result) def test_fizz_buzz__fizz_buzz_1_1(self): self.check_fizz_buzz(1, '1') def test_fizz_buzz__fizz_buzz_2_2(self): self.check_fizz_buzz(2, '2') def test_fizz_buzz__fizz_buzz_3_Fizz(self): self.check_fizz_buzz(3, 'Fizz') def test_fizz_buzz__fizz_buzz_5_Buzz(self): self.check_fizz_buzz(5, 'Buzz') def test_fizz_buzz__fizz_buzz_6_Fizz(self): self.check_fizz_buzz(6, 'Fizz') def test_fizz_buzz__fizz_buzz_10_Buzz(self): self.check_fizz_buzz(10, 'Buzz') def test_fizz_buzz__fizz_buzz_15_FizzBuzz(self): self.check_fizz_buzz(15, 'FizzBuzz') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def is_multiple(value, base): return 0 == value % base def fizz_buzz(value): if is_multiple(value, 5) and is_multiple(value, 3): return 'FizzBuzz' if is_multiple(value, 3): return 'Fizz' if is_multiple(value, 5): return 'Buzz' return str(value) class FizzBuzzTest(unittest.TestCase): def check_fizz_buzz(self, value, expected): result = fizz_buzz(value) self.assertEqual(expected, result) def test_fizz_buzz__fizz_buzz_1_1(self): self.check_fizz_buzz(1, '1') def test_fizz_buzz__fizz_buzz_2_2(self): self.check_fizz_buzz(2, '2') def test_fizz_buzz__fizz_buzz_3_Fizz(self): self.check_fizz_buzz(3, 'Fizz') def test_fizz_buzz__fizz_buzz_5_Buzz(self): self.check_fizz_buzz(5, 'Buzz') def test_fizz_buzz__fizz_buzz_6_Fizz(self): self.check_fizz_buzz(6, 'Fizz') def test_fizz_buzz__fizz_buzz_10_Buzz(self): self.check_fizz_buzz(10, 'Buzz') def test_fizz_buzz__fizz_buzz_15_FizzBuzz(self): self.check_fizz_buzz(15, 'FizzBuzz') if __name__ == '__main__': print('Running all unit tests...') unittest.main() <|reserved_special_token_1|> import unittest def is_multiple(value, base): return 0 == (value % base) def fizz_buzz(value): if is_multiple(value, 5) and is_multiple(value, 3): return "FizzBuzz" if is_multiple(value, 3): return "Fizz" if is_multiple(value, 5): return "Buzz" return str(value) class FizzBuzzTest(unittest.TestCase): def check_fizz_buzz(self, value, expected): result = fizz_buzz(value) self.assertEqual(expected, result) def test_fizz_buzz__fizz_buzz_1_1(self): self.check_fizz_buzz(1, "1") def test_fizz_buzz__fizz_buzz_2_2(self): self.check_fizz_buzz(2, "2") def test_fizz_buzz__fizz_buzz_3_Fizz(self): self.check_fizz_buzz(3, "Fizz") def test_fizz_buzz__fizz_buzz_5_Buzz(self): self.check_fizz_buzz(5, "Buzz") def test_fizz_buzz__fizz_buzz_6_Fizz(self): self.check_fizz_buzz(6, "Fizz") def test_fizz_buzz__fizz_buzz_10_Buzz(self): self.check_fizz_buzz(10, "Buzz") def test_fizz_buzz__fizz_buzz_15_FizzBuzz(self): self.check_fizz_buzz(15, "FizzBuzz") if __name__ == "__main__": print("Running all unit tests...") unittest.main()
flexible
{ "blob_id": "59d543ed443c156ac65f9c806ba5bada6bcd0c21", "index": 6891, "step-1": "<mask token>\n\n\nclass FizzBuzzTest(unittest.TestCase):\n\n def check_fizz_buzz(self, value, expected):\n result = fizz_buzz(value)\n self.assertEqual(expected, result)\n <mask token>\n\n def test_fizz_buzz__fizz_buzz_2_2(self):\n self.check_fizz_buzz(2, '2')\n\n def test_fizz_buzz__fizz_buzz_3_Fizz(self):\n self.check_fizz_buzz(3, 'Fizz')\n\n def test_fizz_buzz__fizz_buzz_5_Buzz(self):\n self.check_fizz_buzz(5, 'Buzz')\n <mask token>\n\n def test_fizz_buzz__fizz_buzz_10_Buzz(self):\n self.check_fizz_buzz(10, 'Buzz')\n\n def test_fizz_buzz__fizz_buzz_15_FizzBuzz(self):\n self.check_fizz_buzz(15, 'FizzBuzz')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef fizz_buzz(value):\n if is_multiple(value, 5) and is_multiple(value, 3):\n return 'FizzBuzz'\n if is_multiple(value, 3):\n return 'Fizz'\n if is_multiple(value, 5):\n return 'Buzz'\n return str(value)\n\n\nclass FizzBuzzTest(unittest.TestCase):\n\n def check_fizz_buzz(self, value, expected):\n result = fizz_buzz(value)\n self.assertEqual(expected, result)\n\n def test_fizz_buzz__fizz_buzz_1_1(self):\n self.check_fizz_buzz(1, '1')\n\n def test_fizz_buzz__fizz_buzz_2_2(self):\n self.check_fizz_buzz(2, '2')\n\n def test_fizz_buzz__fizz_buzz_3_Fizz(self):\n self.check_fizz_buzz(3, 'Fizz')\n\n def test_fizz_buzz__fizz_buzz_5_Buzz(self):\n self.check_fizz_buzz(5, 'Buzz')\n\n def test_fizz_buzz__fizz_buzz_6_Fizz(self):\n self.check_fizz_buzz(6, 'Fizz')\n\n def test_fizz_buzz__fizz_buzz_10_Buzz(self):\n self.check_fizz_buzz(10, 'Buzz')\n\n def test_fizz_buzz__fizz_buzz_15_FizzBuzz(self):\n self.check_fizz_buzz(15, 'FizzBuzz')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef is_multiple(value, base):\n return 0 == value % base\n\n\ndef fizz_buzz(value):\n if is_multiple(value, 5) and is_multiple(value, 3):\n return 'FizzBuzz'\n if is_multiple(value, 3):\n return 'Fizz'\n if is_multiple(value, 5):\n return 'Buzz'\n return str(value)\n\n\nclass FizzBuzzTest(unittest.TestCase):\n\n def check_fizz_buzz(self, value, expected):\n result = fizz_buzz(value)\n self.assertEqual(expected, result)\n\n def test_fizz_buzz__fizz_buzz_1_1(self):\n self.check_fizz_buzz(1, '1')\n\n def test_fizz_buzz__fizz_buzz_2_2(self):\n self.check_fizz_buzz(2, '2')\n\n def test_fizz_buzz__fizz_buzz_3_Fizz(self):\n self.check_fizz_buzz(3, 'Fizz')\n\n def test_fizz_buzz__fizz_buzz_5_Buzz(self):\n self.check_fizz_buzz(5, 'Buzz')\n\n def test_fizz_buzz__fizz_buzz_6_Fizz(self):\n self.check_fizz_buzz(6, 'Fizz')\n\n def test_fizz_buzz__fizz_buzz_10_Buzz(self):\n self.check_fizz_buzz(10, 'Buzz')\n\n def test_fizz_buzz__fizz_buzz_15_FizzBuzz(self):\n self.check_fizz_buzz(15, 'FizzBuzz')\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef is_multiple(value, base):\n return 0 == value % base\n\n\ndef fizz_buzz(value):\n if is_multiple(value, 5) and is_multiple(value, 3):\n return 'FizzBuzz'\n if is_multiple(value, 3):\n return 'Fizz'\n if is_multiple(value, 5):\n return 'Buzz'\n return str(value)\n\n\nclass FizzBuzzTest(unittest.TestCase):\n\n def check_fizz_buzz(self, value, expected):\n result = fizz_buzz(value)\n self.assertEqual(expected, result)\n\n def test_fizz_buzz__fizz_buzz_1_1(self):\n self.check_fizz_buzz(1, '1')\n\n def test_fizz_buzz__fizz_buzz_2_2(self):\n self.check_fizz_buzz(2, '2')\n\n def test_fizz_buzz__fizz_buzz_3_Fizz(self):\n self.check_fizz_buzz(3, 'Fizz')\n\n def test_fizz_buzz__fizz_buzz_5_Buzz(self):\n self.check_fizz_buzz(5, 'Buzz')\n\n def test_fizz_buzz__fizz_buzz_6_Fizz(self):\n self.check_fizz_buzz(6, 'Fizz')\n\n def test_fizz_buzz__fizz_buzz_10_Buzz(self):\n self.check_fizz_buzz(10, 'Buzz')\n\n def test_fizz_buzz__fizz_buzz_15_FizzBuzz(self):\n self.check_fizz_buzz(15, 'FizzBuzz')\n\n\nif __name__ == '__main__':\n print('Running all unit tests...')\n unittest.main()\n", "step-5": "import unittest\n\n\ndef is_multiple(value, base):\n return 0 == (value % base)\n\n\ndef fizz_buzz(value):\n if is_multiple(value, 5) and is_multiple(value, 3):\n return \"FizzBuzz\"\n if is_multiple(value, 3):\n return \"Fizz\"\n if is_multiple(value, 5):\n return \"Buzz\"\n return str(value)\n\n\nclass FizzBuzzTest(unittest.TestCase):\n def check_fizz_buzz(self, value, expected):\n result = fizz_buzz(value)\n\n self.assertEqual(expected, result)\n\n def test_fizz_buzz__fizz_buzz_1_1(self):\n self.check_fizz_buzz(1, \"1\")\n\n def test_fizz_buzz__fizz_buzz_2_2(self):\n self.check_fizz_buzz(2, \"2\")\n\n def test_fizz_buzz__fizz_buzz_3_Fizz(self):\n self.check_fizz_buzz(3, \"Fizz\")\n\n def test_fizz_buzz__fizz_buzz_5_Buzz(self):\n self.check_fizz_buzz(5, \"Buzz\")\n\n def test_fizz_buzz__fizz_buzz_6_Fizz(self):\n self.check_fizz_buzz(6, \"Fizz\")\n\n def test_fizz_buzz__fizz_buzz_10_Buzz(self):\n self.check_fizz_buzz(10, \"Buzz\")\n\n def test_fizz_buzz__fizz_buzz_15_FizzBuzz(self):\n self.check_fizz_buzz(15, \"FizzBuzz\")\n\n\nif __name__ == \"__main__\":\n print(\"Running all unit tests...\")\n unittest.main()\n", "step-ids": [ 7, 10, 11, 12, 14 ] }
[ 7, 10, 11, 12, 14 ]
#coding=utf-8 import yaml import os import os.path import shutil import json import subprocess import sys sys.path.append(os.path.split(os.path.realpath(__file__))[0]) import rtool.taskplugin.plugin.MultiProcessRunner as MultiProcessRunner import rtool.utils as utils logger = utils.getLogger('CopyRes') def run(): logger.debug("CopyRes") pass def run_with_configs(configs,tp=None): logger.debug("Executing NCopyRes") apaction = CopyResAction() apaction.go(configs) pass def safeRemoveDir(dir_path): if os.path.exists(dir_path): shutil.rmtree(dir_path) pass def clean_output(configs): default_output_path = configs["output-root"] safeRemoveDir(default_output_path) pass class CopyResAction: """根据资源配置文件直接复制资源到目标目录""" default_option = None res_root = None packing_root = None ignore_list=[] def setResRoot(self,root): self.res_root = root pass def setPackingRoot(self,root): self.packing_root = root pass def setDefaultOption(self,option): self.default_option = option pass def go(self,config): ext_list = [] input_list = config['input'] if not config['options']['cpall']: if 'cpextlist' in config['options']: ext_list = config['options']['cpextlist'].split(',') for input_file_path in input_list: basedir,filename = os.path.split(input_file_path) name,fext = os.path.splitext(filename) for ext in ext_list: if ext == fext: # 保留目录结构的为相对于配置项根目录的层级 input_file_dir = os.path.dirname(input_file_path) dest_dir = os.path.join(config['outputroot'],os.path.relpath(input_file_dir,config['config-root'])) dest_dir = config['output-root'] # d_dir = config['output'] if 'dst' in config['options']: d_dir = config['options']['dst'] dest_dir = os.path.join(config['outputroot'],d_dir,os.path.relpath(input_file_dir,config['config-root'])) if not os.path.exists(dest_dir): os.makedirs(dest_dir) logger.debug("[CopyRes]copy "+input_file_path+" to "+dest_dir) shutil.copy2(input_file_path,dest_dir) if 'filenames' in config['options']: filenames_list = config['options']['filenames'].split(',') for filename in filenames_list: for input_file_path in input_list: dirname,input_file_name = os.path.split(input_file_path) if filename==input_file_name: # 保留目录结构的为相对于配置项根目录的层级 input_file_dir = os.path.dirname(input_file_path) dest_dir = os.path.join(config['outputroot'],os.path.relpath(input_file_dir,config['config-root'])) dest_dir = config['output-root'] # d_dir = config['output'] if 'dst' in config['options']: d_dir = config['options']['dst'] dest_dir = os.path.join(config['outputroot'],d_dir,os.path.relpath(input_file_dir,config['config-root'])) if not os.path.exists(dest_dir): os.makedirs(dest_dir) logger.debug("[CopyRes]copy "+input_file_path+" to "+dest_dir) shutil.copy2(input_file_path,dest_dir) else: for input_file_path in input_list: # 保留目录结构的为相对于配置项根目录的层级 input_file_dir = os.path.dirname(input_file_path) dest_dir = os.path.join(config['outputroot'],os.path.relpath(input_file_dir,config['config-root'])) dest_dir = config['output-root'] # d_dir = config['output'] if 'dst' in config['options']: d_dir = config['options']['dst'] dest_dir = os.path.join(config['outputroot'],d_dir,os.path.relpath(input_file_dir,config['config-root'])) if not os.path.exists(dest_dir): os.makedirs(dest_dir) logger.debug("[CopyRes]copy "+input_file_path+" to "+dest_dir) shutil.copy2(input_file_path,dest_dir) pass pass
normal
{ "blob_id": "364150d6f37329c43bead0d18da90f0f6ce9cd1b", "index": 4886, "step-1": "<mask token>\n\n\nclass CopyResAction:\n <mask token>\n default_option = None\n res_root = None\n packing_root = None\n ignore_list = []\n\n def setResRoot(self, root):\n self.res_root = root\n pass\n\n def setPackingRoot(self, root):\n self.packing_root = root\n pass\n\n def setDefaultOption(self, option):\n self.default_option = option\n pass\n\n def go(self, config):\n ext_list = []\n input_list = config['input']\n if not config['options']['cpall']:\n if 'cpextlist' in config['options']:\n ext_list = config['options']['cpextlist'].split(',')\n for input_file_path in input_list:\n basedir, filename = os.path.split(input_file_path)\n name, fext = os.path.splitext(filename)\n for ext in ext_list:\n if ext == fext:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'],\n os.path.relpath(input_file_dir, config[\n 'config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'\n ], d_dir, os.path.relpath(\n input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path +\n ' to ' + dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n if 'filenames' in config['options']:\n filenames_list = config['options']['filenames'].split(',')\n for filename in filenames_list:\n for input_file_path in input_list:\n dirname, input_file_name = os.path.split(\n input_file_path)\n if filename == input_file_name:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'],\n os.path.relpath(input_file_dir, config[\n 'config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'\n ], d_dir, os.path.relpath(\n input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path +\n ' to ' + dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n else:\n for input_file_path in input_list:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'], os.path.\n relpath(input_file_dir, config['config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'], d_dir, os\n .path.relpath(input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path + ' to ' +\n dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n pass\n pass\n", "step-2": "<mask token>\n\n\ndef run():\n logger.debug('CopyRes')\n pass\n\n\n<mask token>\n\n\nclass CopyResAction:\n \"\"\"根据资源配置文件直接复制资源到目标目录\"\"\"\n default_option = None\n res_root = None\n packing_root = None\n ignore_list = []\n\n def setResRoot(self, root):\n self.res_root = root\n pass\n\n def setPackingRoot(self, root):\n self.packing_root = root\n pass\n\n def setDefaultOption(self, option):\n self.default_option = option\n pass\n\n def go(self, config):\n ext_list = []\n input_list = config['input']\n if not config['options']['cpall']:\n if 'cpextlist' in config['options']:\n ext_list = config['options']['cpextlist'].split(',')\n for input_file_path in input_list:\n basedir, filename = os.path.split(input_file_path)\n name, fext = os.path.splitext(filename)\n for ext in ext_list:\n if ext == fext:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'],\n os.path.relpath(input_file_dir, config[\n 'config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'\n ], d_dir, os.path.relpath(\n input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path +\n ' to ' + dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n if 'filenames' in config['options']:\n filenames_list = config['options']['filenames'].split(',')\n for filename in filenames_list:\n for input_file_path in input_list:\n dirname, input_file_name = os.path.split(\n input_file_path)\n if filename == input_file_name:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'],\n os.path.relpath(input_file_dir, config[\n 'config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'\n ], d_dir, os.path.relpath(\n input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path +\n ' to ' + dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n else:\n for input_file_path in input_list:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'], os.path.\n relpath(input_file_dir, config['config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'], d_dir, os\n .path.relpath(input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path + ' to ' +\n dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n pass\n pass\n", "step-3": "<mask token>\nsys.path.append(os.path.split(os.path.realpath(__file__))[0])\n<mask token>\nlogger = utils.getLogger('CopyRes')\n\n\ndef run():\n logger.debug('CopyRes')\n pass\n\n\ndef run_with_configs(configs, tp=None):\n logger.debug('Executing NCopyRes')\n apaction = CopyResAction()\n apaction.go(configs)\n pass\n\n\ndef safeRemoveDir(dir_path):\n if os.path.exists(dir_path):\n shutil.rmtree(dir_path)\n pass\n\n\ndef clean_output(configs):\n default_output_path = configs['output-root']\n safeRemoveDir(default_output_path)\n pass\n\n\nclass CopyResAction:\n \"\"\"根据资源配置文件直接复制资源到目标目录\"\"\"\n default_option = None\n res_root = None\n packing_root = None\n ignore_list = []\n\n def setResRoot(self, root):\n self.res_root = root\n pass\n\n def setPackingRoot(self, root):\n self.packing_root = root\n pass\n\n def setDefaultOption(self, option):\n self.default_option = option\n pass\n\n def go(self, config):\n ext_list = []\n input_list = config['input']\n if not config['options']['cpall']:\n if 'cpextlist' in config['options']:\n ext_list = config['options']['cpextlist'].split(',')\n for input_file_path in input_list:\n basedir, filename = os.path.split(input_file_path)\n name, fext = os.path.splitext(filename)\n for ext in ext_list:\n if ext == fext:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'],\n os.path.relpath(input_file_dir, config[\n 'config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'\n ], d_dir, os.path.relpath(\n input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path +\n ' to ' + dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n if 'filenames' in config['options']:\n filenames_list = config['options']['filenames'].split(',')\n for filename in filenames_list:\n for input_file_path in input_list:\n dirname, input_file_name = os.path.split(\n input_file_path)\n if filename == input_file_name:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'],\n os.path.relpath(input_file_dir, config[\n 'config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'\n ], d_dir, os.path.relpath(\n input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path +\n ' to ' + dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n else:\n for input_file_path in input_list:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'], os.path.\n relpath(input_file_dir, config['config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'], d_dir, os\n .path.relpath(input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path + ' to ' +\n dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n pass\n pass\n", "step-4": "import yaml\nimport os\nimport os.path\nimport shutil\nimport json\nimport subprocess\nimport sys\nsys.path.append(os.path.split(os.path.realpath(__file__))[0])\nimport rtool.taskplugin.plugin.MultiProcessRunner as MultiProcessRunner\nimport rtool.utils as utils\nlogger = utils.getLogger('CopyRes')\n\n\ndef run():\n logger.debug('CopyRes')\n pass\n\n\ndef run_with_configs(configs, tp=None):\n logger.debug('Executing NCopyRes')\n apaction = CopyResAction()\n apaction.go(configs)\n pass\n\n\ndef safeRemoveDir(dir_path):\n if os.path.exists(dir_path):\n shutil.rmtree(dir_path)\n pass\n\n\ndef clean_output(configs):\n default_output_path = configs['output-root']\n safeRemoveDir(default_output_path)\n pass\n\n\nclass CopyResAction:\n \"\"\"根据资源配置文件直接复制资源到目标目录\"\"\"\n default_option = None\n res_root = None\n packing_root = None\n ignore_list = []\n\n def setResRoot(self, root):\n self.res_root = root\n pass\n\n def setPackingRoot(self, root):\n self.packing_root = root\n pass\n\n def setDefaultOption(self, option):\n self.default_option = option\n pass\n\n def go(self, config):\n ext_list = []\n input_list = config['input']\n if not config['options']['cpall']:\n if 'cpextlist' in config['options']:\n ext_list = config['options']['cpextlist'].split(',')\n for input_file_path in input_list:\n basedir, filename = os.path.split(input_file_path)\n name, fext = os.path.splitext(filename)\n for ext in ext_list:\n if ext == fext:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'],\n os.path.relpath(input_file_dir, config[\n 'config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'\n ], d_dir, os.path.relpath(\n input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path +\n ' to ' + dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n if 'filenames' in config['options']:\n filenames_list = config['options']['filenames'].split(',')\n for filename in filenames_list:\n for input_file_path in input_list:\n dirname, input_file_name = os.path.split(\n input_file_path)\n if filename == input_file_name:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'],\n os.path.relpath(input_file_dir, config[\n 'config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'\n ], d_dir, os.path.relpath(\n input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path +\n ' to ' + dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n else:\n for input_file_path in input_list:\n input_file_dir = os.path.dirname(input_file_path)\n dest_dir = os.path.join(config['outputroot'], os.path.\n relpath(input_file_dir, config['config-root']))\n dest_dir = config['output-root']\n if 'dst' in config['options']:\n d_dir = config['options']['dst']\n dest_dir = os.path.join(config['outputroot'], d_dir, os\n .path.relpath(input_file_dir, config['config-root']))\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n logger.debug('[CopyRes]copy ' + input_file_path + ' to ' +\n dest_dir)\n shutil.copy2(input_file_path, dest_dir)\n pass\n pass\n", "step-5": "#coding=utf-8\nimport yaml\nimport os\nimport os.path\nimport shutil\nimport json\nimport subprocess\nimport sys\nsys.path.append(os.path.split(os.path.realpath(__file__))[0])\nimport rtool.taskplugin.plugin.MultiProcessRunner as MultiProcessRunner\nimport rtool.utils as utils\n\nlogger = utils.getLogger('CopyRes')\n\ndef run():\n\tlogger.debug(\"CopyRes\")\n\tpass\n\ndef run_with_configs(configs,tp=None):\n\tlogger.debug(\"Executing NCopyRes\")\n\tapaction = CopyResAction()\n\tapaction.go(configs)\n\tpass\n\ndef safeRemoveDir(dir_path):\n\tif os.path.exists(dir_path):\n\t\tshutil.rmtree(dir_path)\n\tpass\n\ndef clean_output(configs):\n\tdefault_output_path = configs[\"output-root\"]\n\tsafeRemoveDir(default_output_path)\n\tpass\n\nclass CopyResAction:\n\t\"\"\"根据资源配置文件直接复制资源到目标目录\"\"\"\n\t\n\tdefault_option = None\n\n\tres_root = None\n\tpacking_root = None\n\tignore_list=[]\n\n\tdef setResRoot(self,root):\n\t\tself.res_root = root\n\t\tpass\n\tdef setPackingRoot(self,root):\n\t\tself.packing_root = root\n\t\tpass\n\tdef setDefaultOption(self,option):\n\t\tself.default_option = option\n\t\tpass\n\n\tdef go(self,config):\n\n\t\text_list = []\n\t\tinput_list = config['input']\n\t\tif not config['options']['cpall']:\n\t\t\tif 'cpextlist' in config['options']:\n\t\t\t\text_list = config['options']['cpextlist'].split(',')\n\t\t\t\tfor input_file_path in input_list:\n\t\t\t\t\tbasedir,filename = os.path.split(input_file_path)\n\t\t\t\t\tname,fext = os.path.splitext(filename)\n\t\t\t\t\tfor ext in ext_list:\t\t\t\t\t\t\n\t\t\t\t\t\tif ext == fext:\n\t\t\t\t\t\t\t# 保留目录结构的为相对于配置项根目录的层级\n\t\t\t\t\t\t\tinput_file_dir = os.path.dirname(input_file_path)\n\t\t\t\t\t\t\tdest_dir = os.path.join(config['outputroot'],os.path.relpath(input_file_dir,config['config-root']))\n\t\t\t\t\t\t\tdest_dir = config['output-root']\n\t\t\t\t\t\t\t# d_dir = config['output']\n\t\t\t\t\t\t\tif 'dst' in config['options']:\n\t\t\t\t\t\t\t\td_dir = config['options']['dst']\n\t\t\t\t\t\t\t\tdest_dir = os.path.join(config['outputroot'],d_dir,os.path.relpath(input_file_dir,config['config-root']))\n\t\t\t\t\t\t\tif not os.path.exists(dest_dir):\n\t\t\t\t\t\t\t\tos.makedirs(dest_dir)\n\t\t\t\t\t\t\tlogger.debug(\"[CopyRes]copy \"+input_file_path+\" to \"+dest_dir)\n\t\t\t\t\t\t\tshutil.copy2(input_file_path,dest_dir)\n\t\t\tif 'filenames' in config['options']:\n\t\t\t\tfilenames_list = config['options']['filenames'].split(',')\n\t\t\t\tfor filename in filenames_list:\n\t\t\t\t\tfor input_file_path in input_list:\n\t\t\t\t\t\tdirname,input_file_name = os.path.split(input_file_path)\n\t\t\t\t\t\tif filename==input_file_name:\n\t\t\t\t\t\t\t# 保留目录结构的为相对于配置项根目录的层级\n\t\t\t\t\t\t\tinput_file_dir = os.path.dirname(input_file_path)\n\t\t\t\t\t\t\tdest_dir = os.path.join(config['outputroot'],os.path.relpath(input_file_dir,config['config-root']))\n\t\t\t\t\t\t\tdest_dir = config['output-root']\n\t\t\t\t\t\t\t# d_dir = config['output']\n\t\t\t\t\t\t\tif 'dst' in config['options']:\n\t\t\t\t\t\t\t\td_dir = config['options']['dst']\n\t\t\t\t\t\t\t\tdest_dir = os.path.join(config['outputroot'],d_dir,os.path.relpath(input_file_dir,config['config-root']))\n\t\t\t\t\t\t\tif not os.path.exists(dest_dir):\n\t\t\t\t\t\t\t\tos.makedirs(dest_dir)\n\t\t\t\t\t\t\tlogger.debug(\"[CopyRes]copy \"+input_file_path+\" to \"+dest_dir)\n\t\t\t\t\t\t\tshutil.copy2(input_file_path,dest_dir)\n\t\telse:\n\t\t\tfor input_file_path in input_list:\n\t\t\t\t# 保留目录结构的为相对于配置项根目录的层级\n\t\t\t\tinput_file_dir = os.path.dirname(input_file_path)\n\t\t\t\tdest_dir = os.path.join(config['outputroot'],os.path.relpath(input_file_dir,config['config-root']))\n\t\t\t\tdest_dir = config['output-root']\n\t\t\t\t# d_dir = config['output']\n\t\t\t\tif 'dst' in config['options']:\n\t\t\t\t\td_dir = config['options']['dst']\n\t\t\t\t\tdest_dir = os.path.join(config['outputroot'],d_dir,os.path.relpath(input_file_dir,config['config-root']))\n\t\t\t\tif not os.path.exists(dest_dir):\n\t\t\t\t\tos.makedirs(dest_dir)\n\t\t\t\tlogger.debug(\"[CopyRes]copy \"+input_file_path+\" to \"+dest_dir)\n\t\t\t\tshutil.copy2(input_file_path,dest_dir)\n\t\t\tpass\n\t\tpass", "step-ids": [ 6, 8, 13, 14, 15 ] }
[ 6, 8, 13, 14, 15 ]
#listas lista=[] print(lista) #lista semana listasemana=["Lunes","Martes","Miercoles","Jueves","Viernes"] print(listasemana[0]) #lista semana listasemana=["Lunes","Martes","Miercoles","Jueves","Viernes"] print(listasemana[-1]) #lista semana listasemana=["Lunes","Martes","Miercoles","Jueves","Viernes"] print(listasemana[0,3]) #quitar los elementos repetidos de una lista listaa=[1,2,3,4,"hola",2,2] conjunto=set(listaa) listaa=listaa(conjunto) print(conjunto) #listas palabras de 2 listas lista1palabras=["Sofia","Karla","Verinica","Lina","Natalia","Estefania"] lista2palabras=["Enrique","Erica","Sofia","Lina","Carlos","Pablo"] print(lista1palabras,lista2palabras) #listas de palabras que aparecen en la primera lista lista1palabras=["Sofia","Karla","Verinica","Lina","Natalia","Estefania"] lista2palabras=["Enrique","Erica","Sofia","Lina","Carlos","Pablo"] print(lista1palabras[1,2,4,5],lista2palabras) #listas de palabras que aparecen en la segunda lista lista1palabras=["Sofia","Karla","Verinica","Lina","Natalia","Estefania"] lista2palabras=["Enrique","Erica","Sofia","Lina","Carlos","Pablo"] print(lista2palabras[0,1,4,5],lista1palabras) #listas palabras repetidas en ambas listas lista1palabras=["Sofia","Karla","Verinica","Lina","Natalia","Estefania"] lista2palabras=["Enrique","Erica","Sofia","Lina","Carlos","Pablo"] print(lista1palabras,lista2palabras[0,3])
normal
{ "blob_id": "37b23dc520abc7cbb6798f41063696916065626f", "index": 2203, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(lista)\n<mask token>\nprint(listasemana[0])\n<mask token>\nprint(listasemana[-1])\n<mask token>\nprint(listasemana[0, 3])\n<mask token>\nprint(conjunto)\n<mask token>\nprint(lista1palabras, lista2palabras)\n<mask token>\nprint(lista1palabras[1, 2, 4, 5], lista2palabras)\n<mask token>\nprint(lista2palabras[0, 1, 4, 5], lista1palabras)\n<mask token>\nprint(lista1palabras, lista2palabras[0, 3])\n", "step-3": "lista = []\nprint(lista)\nlistasemana = ['Lunes', 'Martes', 'Miercoles', 'Jueves', 'Viernes']\nprint(listasemana[0])\nlistasemana = ['Lunes', 'Martes', 'Miercoles', 'Jueves', 'Viernes']\nprint(listasemana[-1])\nlistasemana = ['Lunes', 'Martes', 'Miercoles', 'Jueves', 'Viernes']\nprint(listasemana[0, 3])\nlistaa = [1, 2, 3, 4, 'hola', 2, 2]\nconjunto = set(listaa)\nlistaa = listaa(conjunto)\nprint(conjunto)\nlista1palabras = ['Sofia', 'Karla', 'Verinica', 'Lina', 'Natalia', 'Estefania']\nlista2palabras = ['Enrique', 'Erica', 'Sofia', 'Lina', 'Carlos', 'Pablo']\nprint(lista1palabras, lista2palabras)\nlista1palabras = ['Sofia', 'Karla', 'Verinica', 'Lina', 'Natalia', 'Estefania']\nlista2palabras = ['Enrique', 'Erica', 'Sofia', 'Lina', 'Carlos', 'Pablo']\nprint(lista1palabras[1, 2, 4, 5], lista2palabras)\nlista1palabras = ['Sofia', 'Karla', 'Verinica', 'Lina', 'Natalia', 'Estefania']\nlista2palabras = ['Enrique', 'Erica', 'Sofia', 'Lina', 'Carlos', 'Pablo']\nprint(lista2palabras[0, 1, 4, 5], lista1palabras)\nlista1palabras = ['Sofia', 'Karla', 'Verinica', 'Lina', 'Natalia', 'Estefania']\nlista2palabras = ['Enrique', 'Erica', 'Sofia', 'Lina', 'Carlos', 'Pablo']\nprint(lista1palabras, lista2palabras[0, 3])\n", "step-4": "#listas\nlista=[]\nprint(lista)\n\n#lista semana\nlistasemana=[\"Lunes\",\"Martes\",\"Miercoles\",\"Jueves\",\"Viernes\"]\nprint(listasemana[0])\n\n#lista semana\nlistasemana=[\"Lunes\",\"Martes\",\"Miercoles\",\"Jueves\",\"Viernes\"]\nprint(listasemana[-1])\n\n\n#lista semana\nlistasemana=[\"Lunes\",\"Martes\",\"Miercoles\",\"Jueves\",\"Viernes\"]\nprint(listasemana[0,3])\n\n#quitar los elementos repetidos de una lista\nlistaa=[1,2,3,4,\"hola\",2,2]\nconjunto=set(listaa)\nlistaa=listaa(conjunto)\nprint(conjunto)\n\n#listas palabras de 2 listas\nlista1palabras=[\"Sofia\",\"Karla\",\"Verinica\",\"Lina\",\"Natalia\",\"Estefania\"]\nlista2palabras=[\"Enrique\",\"Erica\",\"Sofia\",\"Lina\",\"Carlos\",\"Pablo\"]\nprint(lista1palabras,lista2palabras)\n\n#listas de palabras que aparecen en la primera lista\nlista1palabras=[\"Sofia\",\"Karla\",\"Verinica\",\"Lina\",\"Natalia\",\"Estefania\"]\nlista2palabras=[\"Enrique\",\"Erica\",\"Sofia\",\"Lina\",\"Carlos\",\"Pablo\"]\nprint(lista1palabras[1,2,4,5],lista2palabras)\n\n#listas de palabras que aparecen en la segunda lista\nlista1palabras=[\"Sofia\",\"Karla\",\"Verinica\",\"Lina\",\"Natalia\",\"Estefania\"]\nlista2palabras=[\"Enrique\",\"Erica\",\"Sofia\",\"Lina\",\"Carlos\",\"Pablo\"]\nprint(lista2palabras[0,1,4,5],lista1palabras)\n\n\n#listas palabras repetidas en ambas listas\nlista1palabras=[\"Sofia\",\"Karla\",\"Verinica\",\"Lina\",\"Natalia\",\"Estefania\"]\nlista2palabras=[\"Enrique\",\"Erica\",\"Sofia\",\"Lina\",\"Carlos\",\"Pablo\"]\nprint(lista1palabras,lista2palabras[0,3])\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# coding: utf-8 ''' Created on 2013-7-8 @author: huqiming ''' import json import re import urllib2 ''' 图说内容 ''' class ts_content: ''' 图说标题 ''' title = '' ''' 图说日期 ''' date = '' ''' 图说段落 ''' parts = [] def __str__(self): return 'parts: ' + str(self.parts) ''' 图说段落 ''' class ts_content_part(json.JSONEncoder): ''' 段落标题 ''' title = '' ''' 段落的子内容 ''' items = [] def __str__(self): return 'title: ' + self.title + ' items: ' + str(self.items) class ts_content_part_item(json.JSONEncoder): txt_info = '' img_url = '' def __init__(self, txt, img): if txt : self.txt_info = txt if img : self.img_url = img def __str__(self): return 'info: ' + self.txt_info + ' img: ' + self.img_url def parse_content(url): # print(url) page = urllib2.urlopen(url) html = page.read() source = html.decode('GBK') parts = perform_parse_content(source) result = ts_content() result.parts = parts; return result def perform_parse_content(source): li = re.finditer(ur'<P>\u3010\d*\u3011.*?</P>', source) i = 0 index = [] res = [] for m in li: title = m.group() part = ts_content_part() part.title = remove_tags(title) res.append(part) pos = m.start() index.append(pos) if(i > 0): part_source = source[index[i - 1]:pos] res_item = parse_content_part(part_source) res[i - 1].items = res_item i += 1 part_source = source[pos:source.index('<P>&nbsp;</P>')] res_item = parse_content_part(part_source) res[i - 1].items = res_item return res def parse_content_part(source): li = re.finditer(r'<(P|DIV)>.*?</(P|DIV)>', source) res = [] for m in li: item = m.group() img = parse_img_src(item) txt = remove_tags(item) res_item = ts_content_part_item(txt, img) # print(res_item) res.append(res_item) return res def parse_img_src(source): m = re.search(r'<IMG.*?>', source) if m: img_tag = m.group() img_m = re.search(r'src=".*?"', img_tag) if img_m: src = img_m.group() src = src[5:-1] return src def remove_tags(source): p = re.compile(r"(<.*?>|</.*?>|<|/>|&nbsp;)") return p.sub('', source) # res = parse('http://www.dapenti.com/blog/more.asp?name=xilei&id=79405') # from ts_json import json_encode # ss = json_encode().encode(res) # print(ss)
normal
{ "blob_id": "094f482ec6d36dfaed7e908bc445e6e015ec409d", "index": 2718, "step-1": "# coding: utf-8\r\n'''\r\nCreated on 2013-7-8\r\n@author: huqiming\r\n'''\r\nimport json\r\nimport re\r\nimport urllib2\r\n'''\r\n图说内容\r\n'''\r\nclass ts_content:\r\n '''\r\n 图说标题\r\n '''\r\n title = ''\r\n '''\r\n 图说日期\r\n '''\r\n date = ''\r\n '''\r\n 图说段落\r\n '''\r\n parts = []\r\n def __str__(self):\r\n return 'parts: ' + str(self.parts)\r\n\r\n'''\r\n图说段落\r\n'''\r\nclass ts_content_part(json.JSONEncoder):\r\n '''\r\n 段落标题\r\n '''\r\n title = ''\r\n '''\r\n 段落的子内容\r\n '''\r\n items = []\r\n def __str__(self):\r\n return 'title: ' + self.title + ' items: ' + str(self.items)\r\n\r\nclass ts_content_part_item(json.JSONEncoder):\r\n txt_info = ''\r\n img_url = ''\r\n \r\n def __init__(self, txt, img):\r\n if txt :\r\n self.txt_info = txt\r\n if img : \r\n self.img_url = img\r\n \r\n def __str__(self):\r\n return 'info: ' + self.txt_info + ' img: ' + self.img_url\r\n \r\ndef parse_content(url):\r\n# print(url)\r\n page = urllib2.urlopen(url)\r\n html = page.read()\r\n source = html.decode('GBK')\r\n \r\n parts = perform_parse_content(source)\r\n result = ts_content()\r\n \r\n result.parts = parts;\r\n return result\r\n\r\ndef perform_parse_content(source):\r\n li = re.finditer(ur'<P>\\u3010\\d*\\u3011.*?</P>', source)\r\n i = 0\r\n\r\n index = []\r\n res = []\r\n for m in li:\r\n title = m.group()\r\n part = ts_content_part()\r\n part.title = remove_tags(title)\r\n res.append(part)\r\n \r\n pos = m.start()\r\n index.append(pos)\r\n \r\n if(i > 0):\r\n part_source = source[index[i - 1]:pos]\r\n res_item = parse_content_part(part_source)\r\n res[i - 1].items = res_item\r\n i += 1\r\n \r\n part_source = source[pos:source.index('<P>&nbsp;</P>')]\r\n res_item = parse_content_part(part_source)\r\n res[i - 1].items = res_item\r\n \r\n return res\r\n\r\ndef parse_content_part(source):\r\n li = re.finditer(r'<(P|DIV)>.*?</(P|DIV)>', source)\r\n res = []\r\n for m in li:\r\n item = m.group()\r\n img = parse_img_src(item)\r\n txt = remove_tags(item)\r\n res_item = ts_content_part_item(txt, img)\r\n# print(res_item)\r\n res.append(res_item)\r\n \r\n return res\r\n\r\ndef parse_img_src(source):\r\n m = re.search(r'<IMG.*?>', source)\r\n if m:\r\n img_tag = m.group()\r\n img_m = re.search(r'src=\".*?\"', img_tag)\r\n if img_m:\r\n src = img_m.group()\r\n src = src[5:-1]\r\n return src\r\n\r\ndef remove_tags(source):\r\n p = re.compile(r\"(<.*?>|</.*?>|<|/>|&nbsp;)\")\r\n return p.sub('', source)\r\n\r\n# res = parse('http://www.dapenti.com/blog/more.asp?name=xilei&id=79405')\r\n# from ts_json import json_encode\r\n# ss = json_encode().encode(res)\r\n# print(ss)\r\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from __future__ import absolute_import, division, print_function import time from flytekit.sdk.tasks import python_task, dynamic_task, inputs, outputs from flytekit.sdk.types import Types from flytekit.sdk.workflow import workflow_class, Input from six.moves import range @inputs(value1=Types.Integer) @outputs(out=Types.Integer) @python_task(cpu_request="1", cpu_limit="1", memory_request="5G") def dynamic_sub_task(workflow_parameters, value1, out): for i in range(11*60): print("This is load test task. I have been running for {} seconds.".format(i)) time.sleep(1) output = value1*2 print("Output: {}".format(output)) out.set(output) @inputs(tasks_count=Types.Integer) @outputs(out=[Types.Integer]) @dynamic_task(cache_version='1') def dynamic_task(workflow_parameters, tasks_count, out): res = [] for i in range(0, tasks_count): task = dynamic_sub_task(value1=i) yield task res.append(task.outputs.out) # Define how to set the final result of the task out.set(res) @workflow_class class FlyteDJOLoadTestWorkflow(object): tasks_count = Input(Types.Integer) dj = dynamic_task(tasks_count=tasks_count)
normal
{ "blob_id": "c30b0db220bdacd31ab23aa1227ce88affb79daa", "index": 2322, "step-1": "<mask token>\n\n\n@workflow_class\nclass FlyteDJOLoadTestWorkflow(object):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\n@workflow_class\nclass FlyteDJOLoadTestWorkflow(object):\n tasks_count = Input(Types.Integer)\n dj = dynamic_task(tasks_count=tasks_count)\n", "step-3": "<mask token>\n\n\n@inputs(value1=Types.Integer)\n@outputs(out=Types.Integer)\n@python_task(cpu_request='1', cpu_limit='1', memory_request='5G')\ndef dynamic_sub_task(workflow_parameters, value1, out):\n for i in range(11 * 60):\n print('This is load test task. I have been running for {} seconds.'\n .format(i))\n time.sleep(1)\n output = value1 * 2\n print('Output: {}'.format(output))\n out.set(output)\n\n\n@inputs(tasks_count=Types.Integer)\n@outputs(out=[Types.Integer])\n@dynamic_task(cache_version='1')\ndef dynamic_task(workflow_parameters, tasks_count, out):\n res = []\n for i in range(0, tasks_count):\n task = dynamic_sub_task(value1=i)\n yield task\n res.append(task.outputs.out)\n out.set(res)\n\n\n@workflow_class\nclass FlyteDJOLoadTestWorkflow(object):\n tasks_count = Input(Types.Integer)\n dj = dynamic_task(tasks_count=tasks_count)\n", "step-4": "from __future__ import absolute_import, division, print_function\nimport time\nfrom flytekit.sdk.tasks import python_task, dynamic_task, inputs, outputs\nfrom flytekit.sdk.types import Types\nfrom flytekit.sdk.workflow import workflow_class, Input\nfrom six.moves import range\n\n\n@inputs(value1=Types.Integer)\n@outputs(out=Types.Integer)\n@python_task(cpu_request='1', cpu_limit='1', memory_request='5G')\ndef dynamic_sub_task(workflow_parameters, value1, out):\n for i in range(11 * 60):\n print('This is load test task. I have been running for {} seconds.'\n .format(i))\n time.sleep(1)\n output = value1 * 2\n print('Output: {}'.format(output))\n out.set(output)\n\n\n@inputs(tasks_count=Types.Integer)\n@outputs(out=[Types.Integer])\n@dynamic_task(cache_version='1')\ndef dynamic_task(workflow_parameters, tasks_count, out):\n res = []\n for i in range(0, tasks_count):\n task = dynamic_sub_task(value1=i)\n yield task\n res.append(task.outputs.out)\n out.set(res)\n\n\n@workflow_class\nclass FlyteDJOLoadTestWorkflow(object):\n tasks_count = Input(Types.Integer)\n dj = dynamic_task(tasks_count=tasks_count)\n", "step-5": "from __future__ import absolute_import, division, print_function\n\nimport time\n\nfrom flytekit.sdk.tasks import python_task, dynamic_task, inputs, outputs\nfrom flytekit.sdk.types import Types\nfrom flytekit.sdk.workflow import workflow_class, Input\nfrom six.moves import range\n\n\n@inputs(value1=Types.Integer)\n@outputs(out=Types.Integer)\n@python_task(cpu_request=\"1\", cpu_limit=\"1\", memory_request=\"5G\")\ndef dynamic_sub_task(workflow_parameters, value1, out):\n for i in range(11*60):\n print(\"This is load test task. I have been running for {} seconds.\".format(i))\n time.sleep(1)\n\n output = value1*2\n print(\"Output: {}\".format(output))\n out.set(output)\n\n\n@inputs(tasks_count=Types.Integer)\n@outputs(out=[Types.Integer])\n@dynamic_task(cache_version='1')\ndef dynamic_task(workflow_parameters, tasks_count, out):\n res = []\n for i in range(0, tasks_count):\n task = dynamic_sub_task(value1=i)\n yield task\n res.append(task.outputs.out)\n\n # Define how to set the final result of the task\n out.set(res)\n\n\n@workflow_class\nclass FlyteDJOLoadTestWorkflow(object):\n tasks_count = Input(Types.Integer)\n dj = dynamic_task(tasks_count=tasks_count)\n", "step-ids": [ 1, 2, 4, 5, 6 ] }
[ 1, 2, 4, 5, 6 ]
import pymongo import redis import json from time import time user_timeline_mongodb = "mongodb://user-timeline-mongodb.sdc-socialnetwork-db.svc.cluster.local:27017/" user_timeline_redis = "user-timeline-redis.sdc-socialnetwork-db.svc.cluster.local" def handle(req): """handle a request to the function Args: req (str): request body """ start = time() event = json.loads(req) user_id = event["user_id"] post_id = event["post_id"] timestamp = event["timestamp"] myclient = pymongo.MongoClient(user_timeline_mongodb) mydb = myclient['user-timeline'] mycol = mydb["user-timeline"] myquery = { "user_id": user_id } mydoc = mycol.find(myquery) if mydoc.count() == 0: posts_j = {} posts_j[str(post_id)] = timestamp mydict = {"user_id": user_id, "posts": json.dumps(posts_j)} mycol.insert_one(mydict) else: posts_j = json.loads(mydoc.next()["posts"]) posts_j[str(post_id)] = timestamp posts_update = {"$set": {"posts": json.dumps(posts_j)}} mycol.update_one(myquery, posts_update) r = redis.Redis(host=user_timeline_redis, port=6379, decode_responses=True) r.hset(user_id, post_id, timestamp) #r.hset("end_time", event["req_id"], str(time())) return str(time() - start)
normal
{ "blob_id": "37969899aa646f4cdd7a5513f17d26b334870f1b", "index": 6598, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef handle(req):\n \"\"\"handle a request to the function\n Args:\n req (str): request body\n \"\"\"\n start = time()\n event = json.loads(req)\n user_id = event['user_id']\n post_id = event['post_id']\n timestamp = event['timestamp']\n myclient = pymongo.MongoClient(user_timeline_mongodb)\n mydb = myclient['user-timeline']\n mycol = mydb['user-timeline']\n myquery = {'user_id': user_id}\n mydoc = mycol.find(myquery)\n if mydoc.count() == 0:\n posts_j = {}\n posts_j[str(post_id)] = timestamp\n mydict = {'user_id': user_id, 'posts': json.dumps(posts_j)}\n mycol.insert_one(mydict)\n else:\n posts_j = json.loads(mydoc.next()['posts'])\n posts_j[str(post_id)] = timestamp\n posts_update = {'$set': {'posts': json.dumps(posts_j)}}\n mycol.update_one(myquery, posts_update)\n r = redis.Redis(host=user_timeline_redis, port=6379, decode_responses=True)\n r.hset(user_id, post_id, timestamp)\n return str(time() - start)\n", "step-3": "<mask token>\nuser_timeline_mongodb = (\n 'mongodb://user-timeline-mongodb.sdc-socialnetwork-db.svc.cluster.local:27017/'\n )\nuser_timeline_redis = (\n 'user-timeline-redis.sdc-socialnetwork-db.svc.cluster.local')\n\n\ndef handle(req):\n \"\"\"handle a request to the function\n Args:\n req (str): request body\n \"\"\"\n start = time()\n event = json.loads(req)\n user_id = event['user_id']\n post_id = event['post_id']\n timestamp = event['timestamp']\n myclient = pymongo.MongoClient(user_timeline_mongodb)\n mydb = myclient['user-timeline']\n mycol = mydb['user-timeline']\n myquery = {'user_id': user_id}\n mydoc = mycol.find(myquery)\n if mydoc.count() == 0:\n posts_j = {}\n posts_j[str(post_id)] = timestamp\n mydict = {'user_id': user_id, 'posts': json.dumps(posts_j)}\n mycol.insert_one(mydict)\n else:\n posts_j = json.loads(mydoc.next()['posts'])\n posts_j[str(post_id)] = timestamp\n posts_update = {'$set': {'posts': json.dumps(posts_j)}}\n mycol.update_one(myquery, posts_update)\n r = redis.Redis(host=user_timeline_redis, port=6379, decode_responses=True)\n r.hset(user_id, post_id, timestamp)\n return str(time() - start)\n", "step-4": "import pymongo\nimport redis\nimport json\nfrom time import time\nuser_timeline_mongodb = (\n 'mongodb://user-timeline-mongodb.sdc-socialnetwork-db.svc.cluster.local:27017/'\n )\nuser_timeline_redis = (\n 'user-timeline-redis.sdc-socialnetwork-db.svc.cluster.local')\n\n\ndef handle(req):\n \"\"\"handle a request to the function\n Args:\n req (str): request body\n \"\"\"\n start = time()\n event = json.loads(req)\n user_id = event['user_id']\n post_id = event['post_id']\n timestamp = event['timestamp']\n myclient = pymongo.MongoClient(user_timeline_mongodb)\n mydb = myclient['user-timeline']\n mycol = mydb['user-timeline']\n myquery = {'user_id': user_id}\n mydoc = mycol.find(myquery)\n if mydoc.count() == 0:\n posts_j = {}\n posts_j[str(post_id)] = timestamp\n mydict = {'user_id': user_id, 'posts': json.dumps(posts_j)}\n mycol.insert_one(mydict)\n else:\n posts_j = json.loads(mydoc.next()['posts'])\n posts_j[str(post_id)] = timestamp\n posts_update = {'$set': {'posts': json.dumps(posts_j)}}\n mycol.update_one(myquery, posts_update)\n r = redis.Redis(host=user_timeline_redis, port=6379, decode_responses=True)\n r.hset(user_id, post_id, timestamp)\n return str(time() - start)\n", "step-5": "import pymongo\nimport redis\nimport json\nfrom time import time\n\nuser_timeline_mongodb = \"mongodb://user-timeline-mongodb.sdc-socialnetwork-db.svc.cluster.local:27017/\"\nuser_timeline_redis = \"user-timeline-redis.sdc-socialnetwork-db.svc.cluster.local\"\n\n\ndef handle(req):\n \"\"\"handle a request to the function\n Args:\n req (str): request body\n \"\"\"\n start = time()\n event = json.loads(req)\n\n user_id = event[\"user_id\"]\n post_id = event[\"post_id\"]\n timestamp = event[\"timestamp\"]\n\n myclient = pymongo.MongoClient(user_timeline_mongodb)\n mydb = myclient['user-timeline']\n mycol = mydb[\"user-timeline\"]\n\n myquery = { \"user_id\": user_id }\n mydoc = mycol.find(myquery)\n\n if mydoc.count() == 0:\n posts_j = {}\n posts_j[str(post_id)] = timestamp\n mydict = {\"user_id\": user_id, \"posts\": json.dumps(posts_j)}\n mycol.insert_one(mydict)\n else:\n posts_j = json.loads(mydoc.next()[\"posts\"])\n posts_j[str(post_id)] = timestamp\n posts_update = {\"$set\": {\"posts\": json.dumps(posts_j)}}\n mycol.update_one(myquery, posts_update)\n\n r = redis.Redis(host=user_timeline_redis, port=6379, decode_responses=True)\n r.hset(user_id, post_id, timestamp)\n\n #r.hset(\"end_time\", event[\"req_id\"], str(time()))\n\n return str(time() - start)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def test_fibonacci_zero(): actual = func.fibonacci(0) expected = 0 assert actual == expected def test_fibonacci_one(): actual = func.fibonacci(1) expected = 1 assert actual == expected def test_fibonacci_negative(): actual = func.fibonacci(-5) expected = 'Negative values are not allowable' assert actual == expected <|reserved_special_token_0|> def test_lucas_negative(): actual = func.lucas(-5) expected = 'Negative values are not allowable' assert actual == expected <|reserved_special_token_0|> def test_non_fibonacci_lucas_zero(): actual = func.non_fibonacci_lucas(0, 2, 4) expected = 2 assert actual == expected def test_non_fibonacci_lucas_one(): actual = func.non_fibonacci_lucas(1, 2, 4) expected = 4 assert actual == expected def test_non_fibonacci_lucas_negative(): actual = func.non_fibonacci_lucas(-5, 2, 4) expected = 'Negative values are not allowable' assert actual == expected def test_non_fibonacci_lucas_else(): actual = func.non_fibonacci_lucas(3, 2, 4) expected = 10 assert actual == expected <|reserved_special_token_1|> <|reserved_special_token_0|> def test_fibonacci_zero(): actual = func.fibonacci(0) expected = 0 assert actual == expected def test_fibonacci_one(): actual = func.fibonacci(1) expected = 1 assert actual == expected def test_fibonacci_negative(): actual = func.fibonacci(-5) expected = 'Negative values are not allowable' assert actual == expected <|reserved_special_token_0|> def test_lucas_zero(): actual = func.lucas(0) expected = 2 assert actual == expected <|reserved_special_token_0|> def test_lucas_negative(): actual = func.lucas(-5) expected = 'Negative values are not allowable' assert actual == expected <|reserved_special_token_0|> def test_non_fibonacci_lucas_zero(): actual = func.non_fibonacci_lucas(0, 2, 4) expected = 2 assert actual == expected def test_non_fibonacci_lucas_one(): actual = func.non_fibonacci_lucas(1, 2, 4) expected = 4 assert actual == expected def test_non_fibonacci_lucas_negative(): actual = func.non_fibonacci_lucas(-5, 2, 4) expected = 'Negative values are not allowable' assert actual == expected def test_non_fibonacci_lucas_else(): actual = func.non_fibonacci_lucas(3, 2, 4) expected = 10 assert actual == expected <|reserved_special_token_1|> <|reserved_special_token_0|> def test_fibonacci_zero(): actual = func.fibonacci(0) expected = 0 assert actual == expected def test_fibonacci_one(): actual = func.fibonacci(1) expected = 1 assert actual == expected def test_fibonacci_negative(): actual = func.fibonacci(-5) expected = 'Negative values are not allowable' assert actual == expected def test_fibonacci_else(): actual = func.fibonacci(6) expected = 8 assert actual == expected <|reserved_special_token_0|> def test_lucas_zero(): actual = func.lucas(0) expected = 2 assert actual == expected def test_lucas_one(): actual = func.lucas(1) expected = 1 assert actual == expected def test_lucas_negative(): actual = func.lucas(-5) expected = 'Negative values are not allowable' assert actual == expected <|reserved_special_token_0|> def test_non_fibonacci_lucas_zero(): actual = func.non_fibonacci_lucas(0, 2, 4) expected = 2 assert actual == expected def test_non_fibonacci_lucas_one(): actual = func.non_fibonacci_lucas(1, 2, 4) expected = 4 assert actual == expected def test_non_fibonacci_lucas_negative(): actual = func.non_fibonacci_lucas(-5, 2, 4) expected = 'Negative values are not allowable' assert actual == expected def test_non_fibonacci_lucas_else(): actual = func.non_fibonacci_lucas(3, 2, 4) expected = 10 assert actual == expected <|reserved_special_token_1|> import math_series.series as func <|reserved_special_token_0|> def test_fibonacci_zero(): actual = func.fibonacci(0) expected = 0 assert actual == expected def test_fibonacci_one(): actual = func.fibonacci(1) expected = 1 assert actual == expected def test_fibonacci_negative(): actual = func.fibonacci(-5) expected = 'Negative values are not allowable' assert actual == expected def test_fibonacci_else(): actual = func.fibonacci(6) expected = 8 assert actual == expected <|reserved_special_token_0|> def test_lucas_zero(): actual = func.lucas(0) expected = 2 assert actual == expected def test_lucas_one(): actual = func.lucas(1) expected = 1 assert actual == expected def test_lucas_negative(): actual = func.lucas(-5) expected = 'Negative values are not allowable' assert actual == expected def test_lucas_else(): actual = func.lucas(6) expected = 18 assert actual == expected <|reserved_special_token_0|> def test_non_fibonacci_lucas_zero(): actual = func.non_fibonacci_lucas(0, 2, 4) expected = 2 assert actual == expected def test_non_fibonacci_lucas_one(): actual = func.non_fibonacci_lucas(1, 2, 4) expected = 4 assert actual == expected def test_non_fibonacci_lucas_negative(): actual = func.non_fibonacci_lucas(-5, 2, 4) expected = 'Negative values are not allowable' assert actual == expected def test_non_fibonacci_lucas_else(): actual = func.non_fibonacci_lucas(3, 2, 4) expected = 10 assert actual == expected <|reserved_special_token_1|> import math_series.series as func """ Testing for fibonacci function """ def test_fibonacci_zero(): actual = func.fibonacci(0) expected = 0 assert actual == expected def test_fibonacci_one(): actual = func.fibonacci(1) expected = 1 assert actual == expected def test_fibonacci_negative(): actual = func.fibonacci(-5) expected = "Negative values are not allowable" assert actual == expected def test_fibonacci_else(): actual = func.fibonacci(6) expected = 8 assert actual == expected """ Testing for lucas function """ def test_lucas_zero(): actual = func.lucas(0) expected = 2 assert actual == expected def test_lucas_one(): actual = func.lucas(1) expected = 1 assert actual == expected def test_lucas_negative(): actual = func.lucas(-5) expected = "Negative values are not allowable" assert actual == expected def test_lucas_else(): actual = func.lucas(6) expected = 18 assert actual == expected """ Testing for non_fibonacci_lucas function """ def test_non_fibonacci_lucas_zero(): actual = func.non_fibonacci_lucas(0,2,4) expected = 2 assert actual == expected def test_non_fibonacci_lucas_one(): actual = func.non_fibonacci_lucas(1,2,4) expected = 4 assert actual == expected def test_non_fibonacci_lucas_negative(): actual = func.non_fibonacci_lucas(-5,2,4) expected = "Negative values are not allowable" assert actual == expected def test_non_fibonacci_lucas_else(): actual = func.non_fibonacci_lucas(3,2,4) expected = 10 assert actual == expected
flexible
{ "blob_id": "49722f640eec02029865fd702e13e485eda6391b", "index": 8126, "step-1": "<mask token>\n\n\ndef test_fibonacci_zero():\n actual = func.fibonacci(0)\n expected = 0\n assert actual == expected\n\n\ndef test_fibonacci_one():\n actual = func.fibonacci(1)\n expected = 1\n assert actual == expected\n\n\ndef test_fibonacci_negative():\n actual = func.fibonacci(-5)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_lucas_negative():\n actual = func.lucas(-5)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_non_fibonacci_lucas_zero():\n actual = func.non_fibonacci_lucas(0, 2, 4)\n expected = 2\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_one():\n actual = func.non_fibonacci_lucas(1, 2, 4)\n expected = 4\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_negative():\n actual = func.non_fibonacci_lucas(-5, 2, 4)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_else():\n actual = func.non_fibonacci_lucas(3, 2, 4)\n expected = 10\n assert actual == expected\n", "step-2": "<mask token>\n\n\ndef test_fibonacci_zero():\n actual = func.fibonacci(0)\n expected = 0\n assert actual == expected\n\n\ndef test_fibonacci_one():\n actual = func.fibonacci(1)\n expected = 1\n assert actual == expected\n\n\ndef test_fibonacci_negative():\n actual = func.fibonacci(-5)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_lucas_zero():\n actual = func.lucas(0)\n expected = 2\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_lucas_negative():\n actual = func.lucas(-5)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_non_fibonacci_lucas_zero():\n actual = func.non_fibonacci_lucas(0, 2, 4)\n expected = 2\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_one():\n actual = func.non_fibonacci_lucas(1, 2, 4)\n expected = 4\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_negative():\n actual = func.non_fibonacci_lucas(-5, 2, 4)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_else():\n actual = func.non_fibonacci_lucas(3, 2, 4)\n expected = 10\n assert actual == expected\n", "step-3": "<mask token>\n\n\ndef test_fibonacci_zero():\n actual = func.fibonacci(0)\n expected = 0\n assert actual == expected\n\n\ndef test_fibonacci_one():\n actual = func.fibonacci(1)\n expected = 1\n assert actual == expected\n\n\ndef test_fibonacci_negative():\n actual = func.fibonacci(-5)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\ndef test_fibonacci_else():\n actual = func.fibonacci(6)\n expected = 8\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_lucas_zero():\n actual = func.lucas(0)\n expected = 2\n assert actual == expected\n\n\ndef test_lucas_one():\n actual = func.lucas(1)\n expected = 1\n assert actual == expected\n\n\ndef test_lucas_negative():\n actual = func.lucas(-5)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_non_fibonacci_lucas_zero():\n actual = func.non_fibonacci_lucas(0, 2, 4)\n expected = 2\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_one():\n actual = func.non_fibonacci_lucas(1, 2, 4)\n expected = 4\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_negative():\n actual = func.non_fibonacci_lucas(-5, 2, 4)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_else():\n actual = func.non_fibonacci_lucas(3, 2, 4)\n expected = 10\n assert actual == expected\n", "step-4": "import math_series.series as func\n<mask token>\n\n\ndef test_fibonacci_zero():\n actual = func.fibonacci(0)\n expected = 0\n assert actual == expected\n\n\ndef test_fibonacci_one():\n actual = func.fibonacci(1)\n expected = 1\n assert actual == expected\n\n\ndef test_fibonacci_negative():\n actual = func.fibonacci(-5)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\ndef test_fibonacci_else():\n actual = func.fibonacci(6)\n expected = 8\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_lucas_zero():\n actual = func.lucas(0)\n expected = 2\n assert actual == expected\n\n\ndef test_lucas_one():\n actual = func.lucas(1)\n expected = 1\n assert actual == expected\n\n\ndef test_lucas_negative():\n actual = func.lucas(-5)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\ndef test_lucas_else():\n actual = func.lucas(6)\n expected = 18\n assert actual == expected\n\n\n<mask token>\n\n\ndef test_non_fibonacci_lucas_zero():\n actual = func.non_fibonacci_lucas(0, 2, 4)\n expected = 2\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_one():\n actual = func.non_fibonacci_lucas(1, 2, 4)\n expected = 4\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_negative():\n actual = func.non_fibonacci_lucas(-5, 2, 4)\n expected = 'Negative values are not allowable'\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_else():\n actual = func.non_fibonacci_lucas(3, 2, 4)\n expected = 10\n assert actual == expected\n", "step-5": "import math_series.series as func\n\n\"\"\" Testing for fibonacci function \"\"\"\ndef test_fibonacci_zero():\n actual = func.fibonacci(0)\n expected = 0\n assert actual == expected\n\ndef test_fibonacci_one():\n actual = func.fibonacci(1)\n expected = 1\n assert actual == expected\n\n\ndef test_fibonacci_negative():\n actual = func.fibonacci(-5)\n expected = \"Negative values are not allowable\"\n assert actual == expected\n\n\ndef test_fibonacci_else():\n actual = func.fibonacci(6)\n expected = 8\n assert actual == expected\n\n\"\"\" Testing for lucas function \"\"\"\n\ndef test_lucas_zero():\n actual = func.lucas(0)\n expected = 2\n assert actual == expected\n\n\ndef test_lucas_one():\n actual = func.lucas(1)\n expected = 1\n assert actual == expected\n\n\n\ndef test_lucas_negative():\n actual = func.lucas(-5)\n expected = \"Negative values are not allowable\"\n assert actual == expected\n\n\ndef test_lucas_else():\n actual = func.lucas(6)\n expected = 18\n assert actual == expected\n\n\n\"\"\" Testing for non_fibonacci_lucas function \"\"\"\n\n\ndef test_non_fibonacci_lucas_zero():\n actual = func.non_fibonacci_lucas(0,2,4)\n expected = 2\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_one():\n actual = func.non_fibonacci_lucas(1,2,4)\n expected = 4\n assert actual == expected\n\n\n\ndef test_non_fibonacci_lucas_negative():\n actual = func.non_fibonacci_lucas(-5,2,4)\n expected = \"Negative values are not allowable\"\n assert actual == expected\n\n\ndef test_non_fibonacci_lucas_else():\n actual = func.non_fibonacci_lucas(3,2,4)\n expected = 10\n assert actual == expected", "step-ids": [ 8, 9, 11, 13, 14 ] }
[ 8, 9, 11, 13, 14 ]
<|reserved_special_token_0|> def get_tweets(filename): """ Process a json formatted file with tweets using pandas read_json """ try: tweets = [] pd_tweets = pd.read_json(filename, lines=True) pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text'] tweets = pd_tweets.to_list() return tweets except: print('Something went wrong parsing the file ' + filename) def get_sentiments(filename): """ Process a file that contains in each line a word or set of words followed by a numerical value, called "feeling - returns a dictionary with pairs of words and sentiments """ valores = {} for linea in open(filename, 'r'): termino, valor = linea.split('\t') valores[termino] = int(valor) return valores <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def get_tweets(filename): """ Process a json formatted file with tweets using pandas read_json """ try: tweets = [] pd_tweets = pd.read_json(filename, lines=True) pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text'] tweets = pd_tweets.to_list() return tweets except: print('Something went wrong parsing the file ' + filename) def get_sentiments(filename): """ Process a file that contains in each line a word or set of words followed by a numerical value, called "feeling - returns a dictionary with pairs of words and sentiments """ valores = {} for linea in open(filename, 'r'): termino, valor = linea.split('\t') valores[termino] = int(valor) return valores <|reserved_special_token_0|> for tweet in list_of_tweets: tweet_sentimiento = 0 words_without_sent = [] number_of_words = 0 for word in tweet.split(' '): tweet_sentimiento += valores.get(word.lower(), 0) number_of_words += 1 if valores.get(word.lower(), 0) == 0: words_without_sent.append(word) for item in words_without_sent: print(item + ': ' + str(tweet_sentimiento / number_of_words)) print('\n') print('--- THE END ---') <|reserved_special_token_1|> <|reserved_special_token_0|> def get_tweets(filename): """ Process a json formatted file with tweets using pandas read_json """ try: tweets = [] pd_tweets = pd.read_json(filename, lines=True) pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text'] tweets = pd_tweets.to_list() return tweets except: print('Something went wrong parsing the file ' + filename) def get_sentiments(filename): """ Process a file that contains in each line a word or set of words followed by a numerical value, called "feeling - returns a dictionary with pairs of words and sentiments """ valores = {} for linea in open(filename, 'r'): termino, valor = linea.split('\t') valores[termino] = int(valor) return valores file_tweet = 'Tweets.txt' file_sentimientos = 'Sentimientos.txt' list_of_tweets = get_tweets(file_tweet) valores = get_sentiments(file_sentimientos) for tweet in list_of_tweets: tweet_sentimiento = 0 words_without_sent = [] number_of_words = 0 for word in tweet.split(' '): tweet_sentimiento += valores.get(word.lower(), 0) number_of_words += 1 if valores.get(word.lower(), 0) == 0: words_without_sent.append(word) for item in words_without_sent: print(item + ': ' + str(tweet_sentimiento / number_of_words)) print('\n') print('--- THE END ---') <|reserved_special_token_1|> import json import pandas as pd def get_tweets(filename): """ Process a json formatted file with tweets using pandas read_json """ try: tweets = [] pd_tweets = pd.read_json(filename, lines=True) pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text'] tweets = pd_tweets.to_list() return tweets except: print('Something went wrong parsing the file ' + filename) def get_sentiments(filename): """ Process a file that contains in each line a word or set of words followed by a numerical value, called "feeling - returns a dictionary with pairs of words and sentiments """ valores = {} for linea in open(filename, 'r'): termino, valor = linea.split('\t') valores[termino] = int(valor) return valores file_tweet = 'Tweets.txt' file_sentimientos = 'Sentimientos.txt' list_of_tweets = get_tweets(file_tweet) valores = get_sentiments(file_sentimientos) for tweet in list_of_tweets: tweet_sentimiento = 0 words_without_sent = [] number_of_words = 0 for word in tweet.split(' '): tweet_sentimiento += valores.get(word.lower(), 0) number_of_words += 1 if valores.get(word.lower(), 0) == 0: words_without_sent.append(word) for item in words_without_sent: print(item + ': ' + str(tweet_sentimiento / number_of_words)) print('\n') print('--- THE END ---') <|reserved_special_token_1|> # MÁSTER EN BIG DATA Y BUSINESS ANALYTICS # MOD 1 - FINAL EVALUATION - EX. 2: dado un archivo que contiene en cada línea # una palabra o conjunto de palabras seguido de un valor numérico denominado # “sentimiento” y un conjunto de tweets, se pide calcular el sentimiento de # aquellas palabras o conjunto de palabras que no tienen un valor asociado en el # archivo de “sentimientos”. Se pueden seguir distintas estrategias para asignar # un valor. Por ejemplo, se podría asignar como valor el valor del “sentimiento” # del tweet en que se encuentra la palabra o conjunto de palabras sin valor, o # el valor medio del “sentimiento” del tweet. import json import pandas as pd # ---- FUNCTIONS --------------------------------------------------------------- def get_tweets(filename): """ Process a json formatted file with tweets using pandas read_json """ try: tweets = [] pd_tweets = pd.read_json(filename, lines=True) # use parameter lines=True to read the file as a json object per line pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text'] tweets = pd_tweets.to_list() return tweets except: print("Something went wrong parsing the file " + filename) def get_sentiments(filename): """ Process a file that contains in each line a word or set of words followed by a numerical value, called "feeling - returns a dictionary with pairs of words and sentiments """ valores = {} for linea in open(filename, 'r'): termino, valor = linea.split('\t') valores[termino] = int(valor) return valores # ---- MAIN PROGRAM ------------------------------------------------------------------------------------------------- # ---- Filenames (including path) file_tweet = 'Tweets.txt' file_sentimientos = 'Sentimientos.txt' # -- PROCESS TWEETS FILE WITH PANDAS READ_JSON list_of_tweets = get_tweets(file_tweet) # -- PROCESS SENTIMIENTOS FILE TO A DICITIONARY valores = get_sentiments(file_sentimientos) # -- PROCESS TWEETS SENTIMENT AND PRINT for tweet in list_of_tweets: tweet_sentimiento = 0 words_without_sent = [] number_of_words = 0 for word in tweet.split(" "): tweet_sentimiento += valores.get(word.lower(),0) number_of_words += 1 if valores.get(word.lower(),0)==0: words_without_sent.append(word) # asignar como valor el valor medio del “sentimiento” del tweet for item in words_without_sent: print(item + ': ' + str(tweet_sentimiento/number_of_words)) print("\n") print("--- THE END ---")
flexible
{ "blob_id": "acd2d84529e197d6f9d134e8d7e25a51a442f3ae", "index": 8615, "step-1": "<mask token>\n\n\ndef get_tweets(filename):\n \"\"\" Process a json formatted file with tweets using pandas read_json \"\"\"\n try:\n tweets = []\n pd_tweets = pd.read_json(filename, lines=True)\n pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text']\n tweets = pd_tweets.to_list()\n return tweets\n except:\n print('Something went wrong parsing the file ' + filename)\n\n\ndef get_sentiments(filename):\n \"\"\" Process a file that contains in each line a word or\n set of words followed by a numerical value, called \"feeling\n - returns a dictionary with pairs of words and sentiments\n \"\"\"\n valores = {}\n for linea in open(filename, 'r'):\n termino, valor = linea.split('\\t')\n valores[termino] = int(valor)\n return valores\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_tweets(filename):\n \"\"\" Process a json formatted file with tweets using pandas read_json \"\"\"\n try:\n tweets = []\n pd_tweets = pd.read_json(filename, lines=True)\n pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text']\n tweets = pd_tweets.to_list()\n return tweets\n except:\n print('Something went wrong parsing the file ' + filename)\n\n\ndef get_sentiments(filename):\n \"\"\" Process a file that contains in each line a word or\n set of words followed by a numerical value, called \"feeling\n - returns a dictionary with pairs of words and sentiments\n \"\"\"\n valores = {}\n for linea in open(filename, 'r'):\n termino, valor = linea.split('\\t')\n valores[termino] = int(valor)\n return valores\n\n\n<mask token>\nfor tweet in list_of_tweets:\n tweet_sentimiento = 0\n words_without_sent = []\n number_of_words = 0\n for word in tweet.split(' '):\n tweet_sentimiento += valores.get(word.lower(), 0)\n number_of_words += 1\n if valores.get(word.lower(), 0) == 0:\n words_without_sent.append(word)\n for item in words_without_sent:\n print(item + ': ' + str(tweet_sentimiento / number_of_words))\n print('\\n')\nprint('--- THE END ---')\n", "step-3": "<mask token>\n\n\ndef get_tweets(filename):\n \"\"\" Process a json formatted file with tweets using pandas read_json \"\"\"\n try:\n tweets = []\n pd_tweets = pd.read_json(filename, lines=True)\n pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text']\n tweets = pd_tweets.to_list()\n return tweets\n except:\n print('Something went wrong parsing the file ' + filename)\n\n\ndef get_sentiments(filename):\n \"\"\" Process a file that contains in each line a word or\n set of words followed by a numerical value, called \"feeling\n - returns a dictionary with pairs of words and sentiments\n \"\"\"\n valores = {}\n for linea in open(filename, 'r'):\n termino, valor = linea.split('\\t')\n valores[termino] = int(valor)\n return valores\n\n\nfile_tweet = 'Tweets.txt'\nfile_sentimientos = 'Sentimientos.txt'\nlist_of_tweets = get_tweets(file_tweet)\nvalores = get_sentiments(file_sentimientos)\nfor tweet in list_of_tweets:\n tweet_sentimiento = 0\n words_without_sent = []\n number_of_words = 0\n for word in tweet.split(' '):\n tweet_sentimiento += valores.get(word.lower(), 0)\n number_of_words += 1\n if valores.get(word.lower(), 0) == 0:\n words_without_sent.append(word)\n for item in words_without_sent:\n print(item + ': ' + str(tweet_sentimiento / number_of_words))\n print('\\n')\nprint('--- THE END ---')\n", "step-4": "import json\nimport pandas as pd\n\n\ndef get_tweets(filename):\n \"\"\" Process a json formatted file with tweets using pandas read_json \"\"\"\n try:\n tweets = []\n pd_tweets = pd.read_json(filename, lines=True)\n pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text']\n tweets = pd_tweets.to_list()\n return tweets\n except:\n print('Something went wrong parsing the file ' + filename)\n\n\ndef get_sentiments(filename):\n \"\"\" Process a file that contains in each line a word or\n set of words followed by a numerical value, called \"feeling\n - returns a dictionary with pairs of words and sentiments\n \"\"\"\n valores = {}\n for linea in open(filename, 'r'):\n termino, valor = linea.split('\\t')\n valores[termino] = int(valor)\n return valores\n\n\nfile_tweet = 'Tweets.txt'\nfile_sentimientos = 'Sentimientos.txt'\nlist_of_tweets = get_tweets(file_tweet)\nvalores = get_sentiments(file_sentimientos)\nfor tweet in list_of_tweets:\n tweet_sentimiento = 0\n words_without_sent = []\n number_of_words = 0\n for word in tweet.split(' '):\n tweet_sentimiento += valores.get(word.lower(), 0)\n number_of_words += 1\n if valores.get(word.lower(), 0) == 0:\n words_without_sent.append(word)\n for item in words_without_sent:\n print(item + ': ' + str(tweet_sentimiento / number_of_words))\n print('\\n')\nprint('--- THE END ---')\n", "step-5": "# MÁSTER EN BIG DATA Y BUSINESS ANALYTICS\n# MOD 1 - FINAL EVALUATION - EX. 2: dado un archivo que contiene en cada línea\n# una palabra o conjunto de palabras seguido de un valor numérico denominado\n# “sentimiento” y un conjunto de tweets, se pide calcular el sentimiento de\n# aquellas palabras o conjunto de palabras que no tienen un valor asociado en el\n# archivo de “sentimientos”. Se pueden seguir distintas estrategias para asignar\n# un valor. Por ejemplo, se podría asignar como valor el valor del “sentimiento”\n# del tweet en que se encuentra la palabra o conjunto de palabras sin valor, o\n# el valor medio del “sentimiento” del tweet.\n\nimport json\nimport pandas as pd\n\n\n# ---- FUNCTIONS ---------------------------------------------------------------\ndef get_tweets(filename):\n \"\"\" Process a json formatted file with tweets using pandas read_json \"\"\"\n try:\n tweets = []\n pd_tweets = pd.read_json(filename, lines=True) # use parameter lines=True to read the file as a json object per line\n pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text']\n tweets = pd_tweets.to_list()\n return tweets\n except:\n print(\"Something went wrong parsing the file \" + filename)\n\ndef get_sentiments(filename):\n \"\"\" Process a file that contains in each line a word or\n set of words followed by a numerical value, called \"feeling\n - returns a dictionary with pairs of words and sentiments\n \"\"\"\n valores = {}\n for linea in open(filename, 'r'):\n termino, valor = linea.split('\\t')\n valores[termino] = int(valor)\n return valores\n\n# ---- MAIN PROGRAM -------------------------------------------------------------------------------------------------\n\n# ---- Filenames (including path)\nfile_tweet = 'Tweets.txt'\nfile_sentimientos = 'Sentimientos.txt'\n\n\n# -- PROCESS TWEETS FILE WITH PANDAS READ_JSON\nlist_of_tweets = get_tweets(file_tweet)\n\n# -- PROCESS SENTIMIENTOS FILE TO A DICITIONARY\nvalores = get_sentiments(file_sentimientos)\n\n# -- PROCESS TWEETS SENTIMENT AND PRINT\nfor tweet in list_of_tweets:\n tweet_sentimiento = 0\n words_without_sent = []\n number_of_words = 0\n for word in tweet.split(\" \"):\n tweet_sentimiento += valores.get(word.lower(),0)\n number_of_words += 1\n if valores.get(word.lower(),0)==0:\n words_without_sent.append(word)\n\n # asignar como valor el valor medio del “sentimiento” del tweet\n for item in words_without_sent:\n print(item + ': ' + str(tweet_sentimiento/number_of_words))\n print(\"\\n\")\n\nprint(\"--- THE END ---\")\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
""" Given a list of partitioned and sentiment-analyzed tweets, run several trials to guess who won the election """ import json import math import sys import pprint import feature_vector def positive_volume(f): return f['relative_volume'] * f['positive_percent'] def inv_negative_volume(f): return 1.0 - f['relative_volume'] * f['negative_percent'] def normalized_sentiment(f): return (f['average_sentiment'] + 1) / 2 def normalized_square_sentiment(f): return (f['avg_square_sentiment'] + 1) / 2 def weighted_sentiment(f): return (f['relative_volume'] * f['average_sentiment'] + 1) / 2 # We want a function that's close to 1 unless the relative tweet volume is low def quadratic_diff_penalty(f, scale): val = f['relative_volume'] return 1 - scale * (1 - val) ** 2 # Experiment using x ** 3 as the penalty function def cubic_diff_penalty(f, scale): val = f['relative_volume'] return 1 - scale * (1 - val) ** 3 def linear_combination(f, a1, a2, a3, a4 = 0, a5 = 0): return (a1 * positive_volume(f) + a2 * inv_negative_volume(f) + a3 * normalized_sentiment(f) + a4 * normalized_square_sentiment(f) + a5 * weighted_sentiment(f)) def run_trial(function, feature_map): candidate_scores = {} total_score = 0 for candidate, features in feature_map.items(): score = function(features) candidate_scores[candidate] = score total_score += score for candidate, score in candidate_scores.items(): candidate_scores[candidate] = score / total_score return candidate_scores def predict(tweet_dictionary, print_all): features = feature_vector.gen_feature_vector(tweet_dictionary) trial_list = [ #1 lambda f: linear_combination(f, 1, 0, 0), lambda f: linear_combination(f, 0.5, 0, 0.5), lambda f: linear_combination(f, 0.33, 0.33, 0.33), lambda f: linear_combination(f, 0.25, 0.25, 0.5), lambda f: linear_combination(f, 0.5, 0.25, 0.25), lambda f: linear_combination(f, 0.2, 0.1, 0.0, 0.7), lambda f: linear_combination(f, 0.0, 0.0, 0.0, 1.0), lambda f: linear_combination(f, 0.5, 0.0, 0.0, 0.5), lambda f: linear_combination(f, 0.3, 0.15, 0.15, 0.3), lambda f: linear_combination(f, 0.5, 0.1, 0.1, 0.3), #11 lambda f: linear_combination(f, 0.6, 0.0, 0.0, 0.4), lambda f: linear_combination(f, 0.55, 0.0, 0.2, 0.25), lambda f: linear_combination(f, 0.5, 0.1, 0.15, 0.25), lambda f: linear_combination(f, 0.5, 0.05, 0.1, 0.35), lambda f: linear_combination(f, 0.4, 0.05, 0.1, 0.35, 0.1), lambda f: linear_combination(f, 0.45, 0.05, 0.05, 0.35, 0.1), lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.35, 0.2), lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3), lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3) * quadratic_diff_penalty(f, 1), lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3) * quadratic_diff_penalty(f, 0.25), # 21 lambda f: linear_combination(f, 0.25, 0.0, 0.15, 0.4, 0.2) * quadratic_diff_penalty(f, 0.25), lambda f: linear_combination(f, 0.25, 0.0, 0.2, 0.45, 0.1) * quadratic_diff_penalty(f, 0.3), lambda f: linear_combination(f, 0.25, 0.0, 0.2, 0.45, 0.1) * quadratic_diff_penalty(f, 0.4), lambda f: linear_combination(f, 0.2, 0.0, 0.2, 0.5, 0.1) * quadratic_diff_penalty(f, 0.4), lambda f: linear_combination(f, 0.2, 0.0, 0.2, 0.5, 0.1) * quadratic_diff_penalty(f, 0.45), lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * quadratic_diff_penalty(f, 0.45), lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * quadratic_diff_penalty(f, 0.5), lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.5), lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.6), lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.7), # 31 lambda f: linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) * cubic_diff_penalty(f, 0.7), lambda f: linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) * cubic_diff_penalty(f, 0.75), lambda f: linear_combination(f, 0.05, 0.0, 0.25, 0.7, 0) * cubic_diff_penalty(f, 0.75), ] if print_all: print('Feature vector:') pprint.pprint(features) print('\nTrial Results:') for index, function in enumerate(trial_list): print('trial %d:' % (index + 1)) print(run_trial(function, features)) print() print() final_trial_result = run_trial(trial_list[-1], features) print('Predicted Outcome:') max_percent = 0 winning_candidate = '' for candidate, percent in final_trial_result.items(): print(candidate + ': ', int(percent * 100008) / 1000) if (percent > max_percent): max_percent = percent winning_candidate = candidate print('\nProjected Winner:') print(winning_candidate) if __name__ == '__main__': if len(sys.argv) != 2 and len(sys.argv) != 3: print('Usage: python predict.py filename [print_all = True]') exit() with open(sys.argv[1], 'r') as tweet_file: print_all = True if len(sys.argv) == 2 else (sys.argv[2].lower() == 'true') predict(json.loads(tweet_file.read()), print_all)
normal
{ "blob_id": "d508cb0a8d4291f1c8e76d9d720be352c05ef146", "index": 8651, "step-1": "<mask token>\n\n\ndef positive_volume(f):\n return f['relative_volume'] * f['positive_percent']\n\n\n<mask token>\n\n\ndef normalized_sentiment(f):\n return (f['average_sentiment'] + 1) / 2\n\n\ndef normalized_square_sentiment(f):\n return (f['avg_square_sentiment'] + 1) / 2\n\n\ndef weighted_sentiment(f):\n return (f['relative_volume'] * f['average_sentiment'] + 1) / 2\n\n\ndef quadratic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 2\n\n\n<mask token>\n\n\ndef linear_combination(f, a1, a2, a3, a4=0, a5=0):\n return a1 * positive_volume(f) + a2 * inv_negative_volume(f\n ) + a3 * normalized_sentiment(f) + a4 * normalized_square_sentiment(f\n ) + a5 * weighted_sentiment(f)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef positive_volume(f):\n return f['relative_volume'] * f['positive_percent']\n\n\n<mask token>\n\n\ndef normalized_sentiment(f):\n return (f['average_sentiment'] + 1) / 2\n\n\ndef normalized_square_sentiment(f):\n return (f['avg_square_sentiment'] + 1) / 2\n\n\ndef weighted_sentiment(f):\n return (f['relative_volume'] * f['average_sentiment'] + 1) / 2\n\n\ndef quadratic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 2\n\n\ndef cubic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 3\n\n\ndef linear_combination(f, a1, a2, a3, a4=0, a5=0):\n return a1 * positive_volume(f) + a2 * inv_negative_volume(f\n ) + a3 * normalized_sentiment(f) + a4 * normalized_square_sentiment(f\n ) + a5 * weighted_sentiment(f)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef positive_volume(f):\n return f['relative_volume'] * f['positive_percent']\n\n\ndef inv_negative_volume(f):\n return 1.0 - f['relative_volume'] * f['negative_percent']\n\n\ndef normalized_sentiment(f):\n return (f['average_sentiment'] + 1) / 2\n\n\ndef normalized_square_sentiment(f):\n return (f['avg_square_sentiment'] + 1) / 2\n\n\ndef weighted_sentiment(f):\n return (f['relative_volume'] * f['average_sentiment'] + 1) / 2\n\n\ndef quadratic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 2\n\n\ndef cubic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 3\n\n\ndef linear_combination(f, a1, a2, a3, a4=0, a5=0):\n return a1 * positive_volume(f) + a2 * inv_negative_volume(f\n ) + a3 * normalized_sentiment(f) + a4 * normalized_square_sentiment(f\n ) + a5 * weighted_sentiment(f)\n\n\ndef run_trial(function, feature_map):\n candidate_scores = {}\n total_score = 0\n for candidate, features in feature_map.items():\n score = function(features)\n candidate_scores[candidate] = score\n total_score += score\n for candidate, score in candidate_scores.items():\n candidate_scores[candidate] = score / total_score\n return candidate_scores\n\n\ndef predict(tweet_dictionary, print_all):\n features = feature_vector.gen_feature_vector(tweet_dictionary)\n trial_list = [lambda f: linear_combination(f, 1, 0, 0), lambda f:\n linear_combination(f, 0.5, 0, 0.5), lambda f: linear_combination(f,\n 0.33, 0.33, 0.33), lambda f: linear_combination(f, 0.25, 0.25, 0.5),\n lambda f: linear_combination(f, 0.5, 0.25, 0.25), lambda f:\n linear_combination(f, 0.2, 0.1, 0.0, 0.7), lambda f:\n linear_combination(f, 0.0, 0.0, 0.0, 1.0), lambda f:\n linear_combination(f, 0.5, 0.0, 0.0, 0.5), lambda f:\n linear_combination(f, 0.3, 0.15, 0.15, 0.3), lambda f:\n linear_combination(f, 0.5, 0.1, 0.1, 0.3), lambda f:\n linear_combination(f, 0.6, 0.0, 0.0, 0.4), lambda f:\n linear_combination(f, 0.55, 0.0, 0.2, 0.25), lambda f:\n linear_combination(f, 0.5, 0.1, 0.15, 0.25), lambda f:\n linear_combination(f, 0.5, 0.05, 0.1, 0.35), lambda f:\n linear_combination(f, 0.4, 0.05, 0.1, 0.35, 0.1), lambda f:\n linear_combination(f, 0.45, 0.05, 0.05, 0.35, 0.1), lambda f:\n linear_combination(f, 0.35, 0.0, 0.1, 0.35, 0.2), lambda f:\n linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3), lambda f: \n linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3) *\n quadratic_diff_penalty(f, 1), lambda f: linear_combination(f, 0.35,\n 0.0, 0.1, 0.25, 0.3) * quadratic_diff_penalty(f, 0.25), lambda f: \n linear_combination(f, 0.25, 0.0, 0.15, 0.4, 0.2) *\n quadratic_diff_penalty(f, 0.25), lambda f: linear_combination(f, \n 0.25, 0.0, 0.2, 0.45, 0.1) * quadratic_diff_penalty(f, 0.3), lambda\n f: linear_combination(f, 0.25, 0.0, 0.2, 0.45, 0.1) *\n quadratic_diff_penalty(f, 0.4), lambda f: linear_combination(f, 0.2,\n 0.0, 0.2, 0.5, 0.1) * quadratic_diff_penalty(f, 0.4), lambda f: \n linear_combination(f, 0.2, 0.0, 0.2, 0.5, 0.1) *\n quadratic_diff_penalty(f, 0.45), lambda f: linear_combination(f, \n 0.15, 0.0, 0.25, 0.55, 0.05) * quadratic_diff_penalty(f, 0.45), lambda\n f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) *\n quadratic_diff_penalty(f, 0.5), lambda f: linear_combination(f, \n 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.5), lambda f:\n linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) *\n cubic_diff_penalty(f, 0.6), lambda f: linear_combination(f, 0.15, \n 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.7), lambda f: \n linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) * cubic_diff_penalty\n (f, 0.7), lambda f: linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) *\n cubic_diff_penalty(f, 0.75), lambda f: linear_combination(f, 0.05, \n 0.0, 0.25, 0.7, 0) * cubic_diff_penalty(f, 0.75)]\n if print_all:\n print('Feature vector:')\n pprint.pprint(features)\n print('\\nTrial Results:')\n for index, function in enumerate(trial_list):\n print('trial %d:' % (index + 1))\n print(run_trial(function, features))\n print()\n print()\n final_trial_result = run_trial(trial_list[-1], features)\n print('Predicted Outcome:')\n max_percent = 0\n winning_candidate = ''\n for candidate, percent in final_trial_result.items():\n print(candidate + ': ', int(percent * 100008) / 1000)\n if percent > max_percent:\n max_percent = percent\n winning_candidate = candidate\n print('\\nProjected Winner:')\n print(winning_candidate)\n\n\n<mask token>\n", "step-4": "<mask token>\nimport json\nimport math\nimport sys\nimport pprint\nimport feature_vector\n\n\ndef positive_volume(f):\n return f['relative_volume'] * f['positive_percent']\n\n\ndef inv_negative_volume(f):\n return 1.0 - f['relative_volume'] * f['negative_percent']\n\n\ndef normalized_sentiment(f):\n return (f['average_sentiment'] + 1) / 2\n\n\ndef normalized_square_sentiment(f):\n return (f['avg_square_sentiment'] + 1) / 2\n\n\ndef weighted_sentiment(f):\n return (f['relative_volume'] * f['average_sentiment'] + 1) / 2\n\n\ndef quadratic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 2\n\n\ndef cubic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 3\n\n\ndef linear_combination(f, a1, a2, a3, a4=0, a5=0):\n return a1 * positive_volume(f) + a2 * inv_negative_volume(f\n ) + a3 * normalized_sentiment(f) + a4 * normalized_square_sentiment(f\n ) + a5 * weighted_sentiment(f)\n\n\ndef run_trial(function, feature_map):\n candidate_scores = {}\n total_score = 0\n for candidate, features in feature_map.items():\n score = function(features)\n candidate_scores[candidate] = score\n total_score += score\n for candidate, score in candidate_scores.items():\n candidate_scores[candidate] = score / total_score\n return candidate_scores\n\n\ndef predict(tweet_dictionary, print_all):\n features = feature_vector.gen_feature_vector(tweet_dictionary)\n trial_list = [lambda f: linear_combination(f, 1, 0, 0), lambda f:\n linear_combination(f, 0.5, 0, 0.5), lambda f: linear_combination(f,\n 0.33, 0.33, 0.33), lambda f: linear_combination(f, 0.25, 0.25, 0.5),\n lambda f: linear_combination(f, 0.5, 0.25, 0.25), lambda f:\n linear_combination(f, 0.2, 0.1, 0.0, 0.7), lambda f:\n linear_combination(f, 0.0, 0.0, 0.0, 1.0), lambda f:\n linear_combination(f, 0.5, 0.0, 0.0, 0.5), lambda f:\n linear_combination(f, 0.3, 0.15, 0.15, 0.3), lambda f:\n linear_combination(f, 0.5, 0.1, 0.1, 0.3), lambda f:\n linear_combination(f, 0.6, 0.0, 0.0, 0.4), lambda f:\n linear_combination(f, 0.55, 0.0, 0.2, 0.25), lambda f:\n linear_combination(f, 0.5, 0.1, 0.15, 0.25), lambda f:\n linear_combination(f, 0.5, 0.05, 0.1, 0.35), lambda f:\n linear_combination(f, 0.4, 0.05, 0.1, 0.35, 0.1), lambda f:\n linear_combination(f, 0.45, 0.05, 0.05, 0.35, 0.1), lambda f:\n linear_combination(f, 0.35, 0.0, 0.1, 0.35, 0.2), lambda f:\n linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3), lambda f: \n linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3) *\n quadratic_diff_penalty(f, 1), lambda f: linear_combination(f, 0.35,\n 0.0, 0.1, 0.25, 0.3) * quadratic_diff_penalty(f, 0.25), lambda f: \n linear_combination(f, 0.25, 0.0, 0.15, 0.4, 0.2) *\n quadratic_diff_penalty(f, 0.25), lambda f: linear_combination(f, \n 0.25, 0.0, 0.2, 0.45, 0.1) * quadratic_diff_penalty(f, 0.3), lambda\n f: linear_combination(f, 0.25, 0.0, 0.2, 0.45, 0.1) *\n quadratic_diff_penalty(f, 0.4), lambda f: linear_combination(f, 0.2,\n 0.0, 0.2, 0.5, 0.1) * quadratic_diff_penalty(f, 0.4), lambda f: \n linear_combination(f, 0.2, 0.0, 0.2, 0.5, 0.1) *\n quadratic_diff_penalty(f, 0.45), lambda f: linear_combination(f, \n 0.15, 0.0, 0.25, 0.55, 0.05) * quadratic_diff_penalty(f, 0.45), lambda\n f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) *\n quadratic_diff_penalty(f, 0.5), lambda f: linear_combination(f, \n 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.5), lambda f:\n linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) *\n cubic_diff_penalty(f, 0.6), lambda f: linear_combination(f, 0.15, \n 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.7), lambda f: \n linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) * cubic_diff_penalty\n (f, 0.7), lambda f: linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) *\n cubic_diff_penalty(f, 0.75), lambda f: linear_combination(f, 0.05, \n 0.0, 0.25, 0.7, 0) * cubic_diff_penalty(f, 0.75)]\n if print_all:\n print('Feature vector:')\n pprint.pprint(features)\n print('\\nTrial Results:')\n for index, function in enumerate(trial_list):\n print('trial %d:' % (index + 1))\n print(run_trial(function, features))\n print()\n print()\n final_trial_result = run_trial(trial_list[-1], features)\n print('Predicted Outcome:')\n max_percent = 0\n winning_candidate = ''\n for candidate, percent in final_trial_result.items():\n print(candidate + ': ', int(percent * 100008) / 1000)\n if percent > max_percent:\n max_percent = percent\n winning_candidate = candidate\n print('\\nProjected Winner:')\n print(winning_candidate)\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2 and len(sys.argv) != 3:\n print('Usage: python predict.py filename [print_all = True]')\n exit()\n with open(sys.argv[1], 'r') as tweet_file:\n print_all = True if len(sys.argv) == 2 else sys.argv[2].lower(\n ) == 'true'\n predict(json.loads(tweet_file.read()), print_all)\n", "step-5": "\"\"\"\nGiven a list of partitioned and sentiment-analyzed tweets, run several trials\nto guess who won the election\n\"\"\"\n\nimport json\nimport math\nimport sys\nimport pprint\n\nimport feature_vector\n\ndef positive_volume(f):\n return f['relative_volume'] * f['positive_percent']\n\ndef inv_negative_volume(f):\n return 1.0 - f['relative_volume'] * f['negative_percent']\n\ndef normalized_sentiment(f):\n return (f['average_sentiment'] + 1) / 2\n\ndef normalized_square_sentiment(f):\n return (f['avg_square_sentiment'] + 1) / 2\n\ndef weighted_sentiment(f):\n return (f['relative_volume'] * f['average_sentiment'] + 1) / 2\n\n# We want a function that's close to 1 unless the relative tweet volume is low\ndef quadratic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 2\n\n# Experiment using x ** 3 as the penalty function\ndef cubic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 3\n\n\ndef linear_combination(f, a1, a2, a3, a4 = 0, a5 = 0):\n return (a1 * positive_volume(f)\n + a2 * inv_negative_volume(f)\n + a3 * normalized_sentiment(f)\n + a4 * normalized_square_sentiment(f)\n + a5 * weighted_sentiment(f))\n\ndef run_trial(function, feature_map):\n candidate_scores = {}\n total_score = 0\n for candidate, features in feature_map.items():\n score = function(features)\n candidate_scores[candidate] = score\n total_score += score\n for candidate, score in candidate_scores.items():\n candidate_scores[candidate] = score / total_score\n return candidate_scores\n\ndef predict(tweet_dictionary, print_all):\n features = feature_vector.gen_feature_vector(tweet_dictionary)\n trial_list = [\n #1\n lambda f: linear_combination(f, 1, 0, 0),\n lambda f: linear_combination(f, 0.5, 0, 0.5),\n lambda f: linear_combination(f, 0.33, 0.33, 0.33),\n lambda f: linear_combination(f, 0.25, 0.25, 0.5),\n lambda f: linear_combination(f, 0.5, 0.25, 0.25),\n lambda f: linear_combination(f, 0.2, 0.1, 0.0, 0.7),\n lambda f: linear_combination(f, 0.0, 0.0, 0.0, 1.0),\n lambda f: linear_combination(f, 0.5, 0.0, 0.0, 0.5),\n lambda f: linear_combination(f, 0.3, 0.15, 0.15, 0.3),\n lambda f: linear_combination(f, 0.5, 0.1, 0.1, 0.3),\n #11\n lambda f: linear_combination(f, 0.6, 0.0, 0.0, 0.4),\n lambda f: linear_combination(f, 0.55, 0.0, 0.2, 0.25),\n lambda f: linear_combination(f, 0.5, 0.1, 0.15, 0.25),\n lambda f: linear_combination(f, 0.5, 0.05, 0.1, 0.35),\n lambda f: linear_combination(f, 0.4, 0.05, 0.1, 0.35, 0.1),\n lambda f: linear_combination(f, 0.45, 0.05, 0.05, 0.35, 0.1),\n lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.35, 0.2),\n lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3),\n lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3) * quadratic_diff_penalty(f, 1),\n lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3) * quadratic_diff_penalty(f, 0.25),\n # 21\n lambda f: linear_combination(f, 0.25, 0.0, 0.15, 0.4, 0.2) * quadratic_diff_penalty(f, 0.25),\n lambda f: linear_combination(f, 0.25, 0.0, 0.2, 0.45, 0.1) * quadratic_diff_penalty(f, 0.3),\n lambda f: linear_combination(f, 0.25, 0.0, 0.2, 0.45, 0.1) * quadratic_diff_penalty(f, 0.4),\n lambda f: linear_combination(f, 0.2, 0.0, 0.2, 0.5, 0.1) * quadratic_diff_penalty(f, 0.4),\n lambda f: linear_combination(f, 0.2, 0.0, 0.2, 0.5, 0.1) * quadratic_diff_penalty(f, 0.45),\n lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * quadratic_diff_penalty(f, 0.45),\n lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * quadratic_diff_penalty(f, 0.5),\n lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.5),\n lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.6),\n lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.7),\n # 31\n lambda f: linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) * cubic_diff_penalty(f, 0.7),\n lambda f: linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) * cubic_diff_penalty(f, 0.75),\n lambda f: linear_combination(f, 0.05, 0.0, 0.25, 0.7, 0) * cubic_diff_penalty(f, 0.75),\n ]\n\n if print_all:\n print('Feature vector:')\n pprint.pprint(features)\n print('\\nTrial Results:')\n for index, function in enumerate(trial_list):\n print('trial %d:' % (index + 1))\n print(run_trial(function, features))\n print()\n print()\n final_trial_result = run_trial(trial_list[-1], features)\n print('Predicted Outcome:')\n max_percent = 0\n winning_candidate = ''\n for candidate, percent in final_trial_result.items():\n print(candidate + ': ', int(percent * 100008) / 1000)\n if (percent > max_percent):\n max_percent = percent\n winning_candidate = candidate\n print('\\nProjected Winner:')\n print(winning_candidate)\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2 and len(sys.argv) != 3:\n print('Usage: python predict.py filename [print_all = True]')\n exit()\n with open(sys.argv[1], 'r') as tweet_file:\n print_all = True if len(sys.argv) == 2 else (sys.argv[2].lower() == 'true')\n predict(json.loads(tweet_file.read()), print_all)\n", "step-ids": [ 6, 7, 10, 12, 13 ] }
[ 6, 7, 10, 12, 13 ]
from django.test import TestCase, Client from accounts.models import Account from .data import account from rest_framework import status class TestAccountRequests(TestCase): def setUp(self): self.client = Client() self.superuser = Account.objects.create_superuser(**account) def test_register_admin(self): response = self.client.post(f'/account/register/', data=account, content_type='application/json') self.assertTrue(status.HTTP_200_OK, response.status_code) def test_login(self): data = { 'email': '[email protected]', 'password': 'Pwd1q2w3e', } Account.objects.create(**data) response = self.client.post(f'/account/login/', data=data, content_type='application/json') self.assertTrue(status.HTTP_200_OK, response.status_code)
normal
{ "blob_id": "3d43bf0d0ca1df06b3647a33f88cee067eeff9f4", "index": 2605, "step-1": "<mask token>\n\n\nclass TestAccountRequests(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.superuser = Account.objects.create_superuser(**account)\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass TestAccountRequests(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.superuser = Account.objects.create_superuser(**account)\n\n def test_register_admin(self):\n response = self.client.post(f'/account/register/', data=account,\n content_type='application/json')\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n <mask token>\n", "step-3": "<mask token>\n\n\nclass TestAccountRequests(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.superuser = Account.objects.create_superuser(**account)\n\n def test_register_admin(self):\n response = self.client.post(f'/account/register/', data=account,\n content_type='application/json')\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n\n def test_login(self):\n data = {'email': '[email protected]', 'password': 'Pwd1q2w3e'}\n Account.objects.create(**data)\n response = self.client.post(f'/account/login/', data=data,\n content_type='application/json')\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n", "step-4": "from django.test import TestCase, Client\nfrom accounts.models import Account\nfrom .data import account\nfrom rest_framework import status\n\n\nclass TestAccountRequests(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.superuser = Account.objects.create_superuser(**account)\n\n def test_register_admin(self):\n response = self.client.post(f'/account/register/', data=account,\n content_type='application/json')\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n\n def test_login(self):\n data = {'email': '[email protected]', 'password': 'Pwd1q2w3e'}\n Account.objects.create(**data)\n response = self.client.post(f'/account/login/', data=data,\n content_type='application/json')\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n", "step-5": "from django.test import TestCase, Client\n\nfrom accounts.models import Account\nfrom .data import account\nfrom rest_framework import status\n\n\nclass TestAccountRequests(TestCase):\n def setUp(self):\n self.client = Client()\n self.superuser = Account.objects.create_superuser(**account)\n\n def test_register_admin(self):\n response = self.client.post(f'/account/register/', data=account,\n content_type='application/json')\n\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n\n def test_login(self):\n data = {\n 'email': '[email protected]',\n 'password': 'Pwd1q2w3e',\n }\n Account.objects.create(**data)\n response = self.client.post(f'/account/login/', data=data,\n content_type='application/json')\n\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. """BatchNorm (BN) utility functions and custom batch-size BN implementations""" from functools import partial import torch import torch.nn as nn from pytorchvideo.layers.batch_norm import ( NaiveSyncBatchNorm1d, NaiveSyncBatchNorm3d, ) # noqa def get_norm(cfg): """ Args: cfg (CfgNode): model building configs, details are in the comments of the config file. Returns: nn.Module: the normalization layer. """ if cfg.BN.NORM_TYPE in {"batchnorm", "sync_batchnorm_apex"}: return nn.BatchNorm3d elif cfg.BN.NORM_TYPE == "sub_batchnorm": return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS) elif cfg.BN.NORM_TYPE == "sync_batchnorm": return partial( NaiveSyncBatchNorm3d, num_sync_devices=cfg.BN.NUM_SYNC_DEVICES, global_sync=cfg.BN.GLOBAL_SYNC, ) else: raise NotImplementedError( "Norm type {} is not supported".format(cfg.BN.NORM_TYPE) ) class SubBatchNorm3d(nn.Module): """ The standard BN layer computes stats across all examples in a GPU. In some cases it is desirable to compute stats across only a subset of examples (e.g., in multigrid training https://arxiv.org/abs/1912.00998). SubBatchNorm3d splits the batch dimension into N splits, and run BN on each of them separately (so that the stats are computed on each subset of examples (1/N of batch) independently. During evaluation, it aggregates the stats from all splits into one BN. """ def __init__(self, num_splits, **args): """ Args: num_splits (int): number of splits. args (list): other arguments. """ super(SubBatchNorm3d, self).__init__() self.num_splits = num_splits num_features = args["num_features"] # Keep only one set of weight and bias. if args.get("affine", True): self.affine = True args["affine"] = False self.weight = torch.nn.Parameter(torch.ones(num_features)) self.bias = torch.nn.Parameter(torch.zeros(num_features)) else: self.affine = False self.bn = nn.BatchNorm3d(**args) args["num_features"] = num_features * num_splits self.split_bn = nn.BatchNorm3d(**args) def _get_aggregated_mean_std(self, means, stds, n): """ Calculate the aggregated mean and stds. Args: means (tensor): mean values. stds (tensor): standard deviations. n (int): number of sets of means and stds. """ mean = means.view(n, -1).sum(0) / n std = ( stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n ) return mean.detach(), std.detach() def aggregate_stats(self): """ Synchronize running_mean, and running_var. Call this before eval. """ if self.split_bn.track_running_stats: ( self.bn.running_mean.data, self.bn.running_var.data, ) = self._get_aggregated_mean_std( self.split_bn.running_mean, self.split_bn.running_var, self.num_splits, ) def forward(self, x): if self.training: n, c, t, h, w = x.shape x = x.view(n // self.num_splits, c * self.num_splits, t, h, w) x = self.split_bn(x) x = x.view(n, c, t, h, w) else: x = self.bn(x) if self.affine: x = x * self.weight.view((-1, 1, 1, 1)) x = x + self.bias.view((-1, 1, 1, 1)) return x
normal
{ "blob_id": "4e5e1be289b32655736d8c6c02d354a85d4268b7", "index": 3027, "step-1": "<mask token>\n\n\nclass SubBatchNorm3d(nn.Module):\n <mask token>\n\n def __init__(self, num_splits, **args):\n \"\"\"\n Args:\n num_splits (int): number of splits.\n args (list): other arguments.\n \"\"\"\n super(SubBatchNorm3d, self).__init__()\n self.num_splits = num_splits\n num_features = args['num_features']\n if args.get('affine', True):\n self.affine = True\n args['affine'] = False\n self.weight = torch.nn.Parameter(torch.ones(num_features))\n self.bias = torch.nn.Parameter(torch.zeros(num_features))\n else:\n self.affine = False\n self.bn = nn.BatchNorm3d(**args)\n args['num_features'] = num_features * num_splits\n self.split_bn = nn.BatchNorm3d(**args)\n\n def _get_aggregated_mean_std(self, means, stds, n):\n \"\"\"\n Calculate the aggregated mean and stds.\n Args:\n means (tensor): mean values.\n stds (tensor): standard deviations.\n n (int): number of sets of means and stds.\n \"\"\"\n mean = means.view(n, -1).sum(0) / n\n std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2\n ).view(n, -1).sum(0) / n\n return mean.detach(), std.detach()\n\n def aggregate_stats(self):\n \"\"\"\n Synchronize running_mean, and running_var. Call this before eval.\n \"\"\"\n if self.split_bn.track_running_stats:\n self.bn.running_mean.data, self.bn.running_var.data = (self.\n _get_aggregated_mean_std(self.split_bn.running_mean, self.\n split_bn.running_var, self.num_splits))\n\n def forward(self, x):\n if self.training:\n n, c, t, h, w = x.shape\n x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)\n x = self.split_bn(x)\n x = x.view(n, c, t, h, w)\n else:\n x = self.bn(x)\n if self.affine:\n x = x * self.weight.view((-1, 1, 1, 1))\n x = x + self.bias.view((-1, 1, 1, 1))\n return x\n", "step-2": "<mask token>\n\n\nclass SubBatchNorm3d(nn.Module):\n \"\"\"\n The standard BN layer computes stats across all examples in a GPU. In some\n cases it is desirable to compute stats across only a subset of examples\n (e.g., in multigrid training https://arxiv.org/abs/1912.00998).\n SubBatchNorm3d splits the batch dimension into N splits, and run BN on\n each of them separately (so that the stats are computed on each subset of\n examples (1/N of batch) independently. During evaluation, it aggregates\n the stats from all splits into one BN.\n \"\"\"\n\n def __init__(self, num_splits, **args):\n \"\"\"\n Args:\n num_splits (int): number of splits.\n args (list): other arguments.\n \"\"\"\n super(SubBatchNorm3d, self).__init__()\n self.num_splits = num_splits\n num_features = args['num_features']\n if args.get('affine', True):\n self.affine = True\n args['affine'] = False\n self.weight = torch.nn.Parameter(torch.ones(num_features))\n self.bias = torch.nn.Parameter(torch.zeros(num_features))\n else:\n self.affine = False\n self.bn = nn.BatchNorm3d(**args)\n args['num_features'] = num_features * num_splits\n self.split_bn = nn.BatchNorm3d(**args)\n\n def _get_aggregated_mean_std(self, means, stds, n):\n \"\"\"\n Calculate the aggregated mean and stds.\n Args:\n means (tensor): mean values.\n stds (tensor): standard deviations.\n n (int): number of sets of means and stds.\n \"\"\"\n mean = means.view(n, -1).sum(0) / n\n std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2\n ).view(n, -1).sum(0) / n\n return mean.detach(), std.detach()\n\n def aggregate_stats(self):\n \"\"\"\n Synchronize running_mean, and running_var. Call this before eval.\n \"\"\"\n if self.split_bn.track_running_stats:\n self.bn.running_mean.data, self.bn.running_var.data = (self.\n _get_aggregated_mean_std(self.split_bn.running_mean, self.\n split_bn.running_var, self.num_splits))\n\n def forward(self, x):\n if self.training:\n n, c, t, h, w = x.shape\n x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)\n x = self.split_bn(x)\n x = x.view(n, c, t, h, w)\n else:\n x = self.bn(x)\n if self.affine:\n x = x * self.weight.view((-1, 1, 1, 1))\n x = x + self.bias.view((-1, 1, 1, 1))\n return x\n", "step-3": "<mask token>\n\n\ndef get_norm(cfg):\n \"\"\"\n Args:\n cfg (CfgNode): model building configs, details are in the comments of\n the config file.\n Returns:\n nn.Module: the normalization layer.\n \"\"\"\n if cfg.BN.NORM_TYPE in {'batchnorm', 'sync_batchnorm_apex'}:\n return nn.BatchNorm3d\n elif cfg.BN.NORM_TYPE == 'sub_batchnorm':\n return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)\n elif cfg.BN.NORM_TYPE == 'sync_batchnorm':\n return partial(NaiveSyncBatchNorm3d, num_sync_devices=cfg.BN.\n NUM_SYNC_DEVICES, global_sync=cfg.BN.GLOBAL_SYNC)\n else:\n raise NotImplementedError('Norm type {} is not supported'.format(\n cfg.BN.NORM_TYPE))\n\n\nclass SubBatchNorm3d(nn.Module):\n \"\"\"\n The standard BN layer computes stats across all examples in a GPU. In some\n cases it is desirable to compute stats across only a subset of examples\n (e.g., in multigrid training https://arxiv.org/abs/1912.00998).\n SubBatchNorm3d splits the batch dimension into N splits, and run BN on\n each of them separately (so that the stats are computed on each subset of\n examples (1/N of batch) independently. During evaluation, it aggregates\n the stats from all splits into one BN.\n \"\"\"\n\n def __init__(self, num_splits, **args):\n \"\"\"\n Args:\n num_splits (int): number of splits.\n args (list): other arguments.\n \"\"\"\n super(SubBatchNorm3d, self).__init__()\n self.num_splits = num_splits\n num_features = args['num_features']\n if args.get('affine', True):\n self.affine = True\n args['affine'] = False\n self.weight = torch.nn.Parameter(torch.ones(num_features))\n self.bias = torch.nn.Parameter(torch.zeros(num_features))\n else:\n self.affine = False\n self.bn = nn.BatchNorm3d(**args)\n args['num_features'] = num_features * num_splits\n self.split_bn = nn.BatchNorm3d(**args)\n\n def _get_aggregated_mean_std(self, means, stds, n):\n \"\"\"\n Calculate the aggregated mean and stds.\n Args:\n means (tensor): mean values.\n stds (tensor): standard deviations.\n n (int): number of sets of means and stds.\n \"\"\"\n mean = means.view(n, -1).sum(0) / n\n std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2\n ).view(n, -1).sum(0) / n\n return mean.detach(), std.detach()\n\n def aggregate_stats(self):\n \"\"\"\n Synchronize running_mean, and running_var. Call this before eval.\n \"\"\"\n if self.split_bn.track_running_stats:\n self.bn.running_mean.data, self.bn.running_var.data = (self.\n _get_aggregated_mean_std(self.split_bn.running_mean, self.\n split_bn.running_var, self.num_splits))\n\n def forward(self, x):\n if self.training:\n n, c, t, h, w = x.shape\n x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)\n x = self.split_bn(x)\n x = x.view(n, c, t, h, w)\n else:\n x = self.bn(x)\n if self.affine:\n x = x * self.weight.view((-1, 1, 1, 1))\n x = x + self.bias.view((-1, 1, 1, 1))\n return x\n", "step-4": "<mask token>\nfrom functools import partial\nimport torch\nimport torch.nn as nn\nfrom pytorchvideo.layers.batch_norm import NaiveSyncBatchNorm1d, NaiveSyncBatchNorm3d\n\n\ndef get_norm(cfg):\n \"\"\"\n Args:\n cfg (CfgNode): model building configs, details are in the comments of\n the config file.\n Returns:\n nn.Module: the normalization layer.\n \"\"\"\n if cfg.BN.NORM_TYPE in {'batchnorm', 'sync_batchnorm_apex'}:\n return nn.BatchNorm3d\n elif cfg.BN.NORM_TYPE == 'sub_batchnorm':\n return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)\n elif cfg.BN.NORM_TYPE == 'sync_batchnorm':\n return partial(NaiveSyncBatchNorm3d, num_sync_devices=cfg.BN.\n NUM_SYNC_DEVICES, global_sync=cfg.BN.GLOBAL_SYNC)\n else:\n raise NotImplementedError('Norm type {} is not supported'.format(\n cfg.BN.NORM_TYPE))\n\n\nclass SubBatchNorm3d(nn.Module):\n \"\"\"\n The standard BN layer computes stats across all examples in a GPU. In some\n cases it is desirable to compute stats across only a subset of examples\n (e.g., in multigrid training https://arxiv.org/abs/1912.00998).\n SubBatchNorm3d splits the batch dimension into N splits, and run BN on\n each of them separately (so that the stats are computed on each subset of\n examples (1/N of batch) independently. During evaluation, it aggregates\n the stats from all splits into one BN.\n \"\"\"\n\n def __init__(self, num_splits, **args):\n \"\"\"\n Args:\n num_splits (int): number of splits.\n args (list): other arguments.\n \"\"\"\n super(SubBatchNorm3d, self).__init__()\n self.num_splits = num_splits\n num_features = args['num_features']\n if args.get('affine', True):\n self.affine = True\n args['affine'] = False\n self.weight = torch.nn.Parameter(torch.ones(num_features))\n self.bias = torch.nn.Parameter(torch.zeros(num_features))\n else:\n self.affine = False\n self.bn = nn.BatchNorm3d(**args)\n args['num_features'] = num_features * num_splits\n self.split_bn = nn.BatchNorm3d(**args)\n\n def _get_aggregated_mean_std(self, means, stds, n):\n \"\"\"\n Calculate the aggregated mean and stds.\n Args:\n means (tensor): mean values.\n stds (tensor): standard deviations.\n n (int): number of sets of means and stds.\n \"\"\"\n mean = means.view(n, -1).sum(0) / n\n std = stds.view(n, -1).sum(0) / n + ((means.view(n, -1) - mean) ** 2\n ).view(n, -1).sum(0) / n\n return mean.detach(), std.detach()\n\n def aggregate_stats(self):\n \"\"\"\n Synchronize running_mean, and running_var. Call this before eval.\n \"\"\"\n if self.split_bn.track_running_stats:\n self.bn.running_mean.data, self.bn.running_var.data = (self.\n _get_aggregated_mean_std(self.split_bn.running_mean, self.\n split_bn.running_var, self.num_splits))\n\n def forward(self, x):\n if self.training:\n n, c, t, h, w = x.shape\n x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)\n x = self.split_bn(x)\n x = x.view(n, c, t, h, w)\n else:\n x = self.bn(x)\n if self.affine:\n x = x * self.weight.view((-1, 1, 1, 1))\n x = x + self.bias.view((-1, 1, 1, 1))\n return x\n", "step-5": "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\n\"\"\"BatchNorm (BN) utility functions and custom batch-size BN implementations\"\"\"\n\nfrom functools import partial\nimport torch\nimport torch.nn as nn\n\nfrom pytorchvideo.layers.batch_norm import (\n NaiveSyncBatchNorm1d,\n NaiveSyncBatchNorm3d,\n) # noqa\n\n\ndef get_norm(cfg):\n \"\"\"\n Args:\n cfg (CfgNode): model building configs, details are in the comments of\n the config file.\n Returns:\n nn.Module: the normalization layer.\n \"\"\"\n if cfg.BN.NORM_TYPE in {\"batchnorm\", \"sync_batchnorm_apex\"}:\n return nn.BatchNorm3d\n elif cfg.BN.NORM_TYPE == \"sub_batchnorm\":\n return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)\n elif cfg.BN.NORM_TYPE == \"sync_batchnorm\":\n return partial(\n NaiveSyncBatchNorm3d,\n num_sync_devices=cfg.BN.NUM_SYNC_DEVICES,\n global_sync=cfg.BN.GLOBAL_SYNC,\n )\n else:\n raise NotImplementedError(\n \"Norm type {} is not supported\".format(cfg.BN.NORM_TYPE)\n )\n\n\nclass SubBatchNorm3d(nn.Module):\n \"\"\"\n The standard BN layer computes stats across all examples in a GPU. In some\n cases it is desirable to compute stats across only a subset of examples\n (e.g., in multigrid training https://arxiv.org/abs/1912.00998).\n SubBatchNorm3d splits the batch dimension into N splits, and run BN on\n each of them separately (so that the stats are computed on each subset of\n examples (1/N of batch) independently. During evaluation, it aggregates\n the stats from all splits into one BN.\n \"\"\"\n\n def __init__(self, num_splits, **args):\n \"\"\"\n Args:\n num_splits (int): number of splits.\n args (list): other arguments.\n \"\"\"\n super(SubBatchNorm3d, self).__init__()\n self.num_splits = num_splits\n num_features = args[\"num_features\"]\n # Keep only one set of weight and bias.\n if args.get(\"affine\", True):\n self.affine = True\n args[\"affine\"] = False\n self.weight = torch.nn.Parameter(torch.ones(num_features))\n self.bias = torch.nn.Parameter(torch.zeros(num_features))\n else:\n self.affine = False\n self.bn = nn.BatchNorm3d(**args)\n args[\"num_features\"] = num_features * num_splits\n self.split_bn = nn.BatchNorm3d(**args)\n\n def _get_aggregated_mean_std(self, means, stds, n):\n \"\"\"\n Calculate the aggregated mean and stds.\n Args:\n means (tensor): mean values.\n stds (tensor): standard deviations.\n n (int): number of sets of means and stds.\n \"\"\"\n mean = means.view(n, -1).sum(0) / n\n std = (\n stds.view(n, -1).sum(0) / n\n + ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n\n )\n return mean.detach(), std.detach()\n\n def aggregate_stats(self):\n \"\"\"\n Synchronize running_mean, and running_var. Call this before eval.\n \"\"\"\n if self.split_bn.track_running_stats:\n (\n self.bn.running_mean.data,\n self.bn.running_var.data,\n ) = self._get_aggregated_mean_std(\n self.split_bn.running_mean,\n self.split_bn.running_var,\n self.num_splits,\n )\n\n def forward(self, x):\n if self.training:\n n, c, t, h, w = x.shape\n x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)\n x = self.split_bn(x)\n x = x.view(n, c, t, h, w)\n else:\n x = self.bn(x)\n if self.affine:\n x = x * self.weight.view((-1, 1, 1, 1))\n x = x + self.bias.view((-1, 1, 1, 1))\n return x\n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
# Generated by Django 3.0.3 on 2020-02-09 06:29 import datetime from django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('devices_collect', '0004_auto_20200209_1304'), ] operations = [ migrations.AlterField( model_name='collectdevices', name='generated_time', field=models.DateTimeField(default=datetime.datetime(2020, 2, 9, 6, 28, 34, 547300, tzinfo=utc)), ), ]
normal
{ "blob_id": "b07d042c61e9e6647822989444e72db2e01c64d0", "index": 5751, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('devices_collect', '0004_auto_20200209_1304')]\n operations = [migrations.AlterField(model_name='collectdevices', name=\n 'generated_time', field=models.DateTimeField(default=datetime.\n datetime(2020, 2, 9, 6, 28, 34, 547300, tzinfo=utc)))]\n", "step-4": "import datetime\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n dependencies = [('devices_collect', '0004_auto_20200209_1304')]\n operations = [migrations.AlterField(model_name='collectdevices', name=\n 'generated_time', field=models.DateTimeField(default=datetime.\n datetime(2020, 2, 9, 6, 28, 34, 547300, tzinfo=utc)))]\n", "step-5": "# Generated by Django 3.0.3 on 2020-02-09 06:29\n\nimport datetime\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('devices_collect', '0004_auto_20200209_1304'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='collectdevices',\n name='generated_time',\n field=models.DateTimeField(default=datetime.datetime(2020, 2, 9, 6, 28, 34, 547300, tzinfo=utc)),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def delete(request, pk): if delete_a_model(pk): return redirect('myapp:home') else: return HttpResponse('Error Occured') def search(request): if request.method == 'POST': pk = request.POST.get('pk') try: content = read_a_model(pk) except: return render(request, 'myapp/search.html', {'error': 'No Result Found!'}) return render(request, 'myapp/search_results.html', {'content': content}) return render(request, 'myapp/search.html') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def home(request): content = read_all_models() sorted_list = sorted(content, key=lambda k: k['title'].title(), reverse =False) return render(request, 'myapp/index.html', {'sorted_list': sorted_list}) def create(request): if request.method == 'POST': title = request.POST.get('title') desc = request.POST.get('desc') d = {} d['title'] = title d['desc'] = desc if create_a_model(d): return redirect('myapp:home') else: return HttpResponse('Error Occured') return render(request, 'myapp/create.html') def delete(request, pk): if delete_a_model(pk): return redirect('myapp:home') else: return HttpResponse('Error Occured') def search(request): if request.method == 'POST': pk = request.POST.get('pk') try: content = read_a_model(pk) except: return render(request, 'myapp/search.html', {'error': 'No Result Found!'}) return render(request, 'myapp/search_results.html', {'content': content}) return render(request, 'myapp/search.html') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def home(request): content = read_all_models() sorted_list = sorted(content, key=lambda k: k['title'].title(), reverse =False) return render(request, 'myapp/index.html', {'sorted_list': sorted_list}) def create(request): if request.method == 'POST': title = request.POST.get('title') desc = request.POST.get('desc') d = {} d['title'] = title d['desc'] = desc if create_a_model(d): return redirect('myapp:home') else: return HttpResponse('Error Occured') return render(request, 'myapp/create.html') def delete(request, pk): if delete_a_model(pk): return redirect('myapp:home') else: return HttpResponse('Error Occured') def search(request): if request.method == 'POST': pk = request.POST.get('pk') try: content = read_a_model(pk) except: return render(request, 'myapp/search.html', {'error': 'No Result Found!'}) return render(request, 'myapp/search_results.html', {'content': content}) return render(request, 'myapp/search.html') def update(request, pk): if request.method == 'POST': title = request.POST.get('title') desc = request.POST.get('desc') d = {} d['title'] = title d['desc'] = desc if update_a_model(pk, d): return redirect('myapp:home') else: return HttpResponse('Error Occured') content = read_a_model(pk) return render(request, 'myapp/update.html', {'content': content}) <|reserved_special_token_1|> from django.shortcuts import render, redirect from django.http import HttpResponse from .tasks import read_all_models, update_a_model, delete_a_model, read_a_model, create_a_model from .forms import MyModelForm def home(request): content = read_all_models() sorted_list = sorted(content, key=lambda k: k['title'].title(), reverse =False) return render(request, 'myapp/index.html', {'sorted_list': sorted_list}) def create(request): if request.method == 'POST': title = request.POST.get('title') desc = request.POST.get('desc') d = {} d['title'] = title d['desc'] = desc if create_a_model(d): return redirect('myapp:home') else: return HttpResponse('Error Occured') return render(request, 'myapp/create.html') def delete(request, pk): if delete_a_model(pk): return redirect('myapp:home') else: return HttpResponse('Error Occured') def search(request): if request.method == 'POST': pk = request.POST.get('pk') try: content = read_a_model(pk) except: return render(request, 'myapp/search.html', {'error': 'No Result Found!'}) return render(request, 'myapp/search_results.html', {'content': content}) return render(request, 'myapp/search.html') def update(request, pk): if request.method == 'POST': title = request.POST.get('title') desc = request.POST.get('desc') d = {} d['title'] = title d['desc'] = desc if update_a_model(pk, d): return redirect('myapp:home') else: return HttpResponse('Error Occured') content = read_a_model(pk) return render(request, 'myapp/update.html', {'content': content}) <|reserved_special_token_1|> from django.shortcuts import render,redirect from django.http import HttpResponse from .tasks import read_all_models, update_a_model, delete_a_model, read_a_model, create_a_model from .forms import MyModelForm def home(request): content = read_all_models() sorted_list = sorted(content, key=lambda k: k['title'].title(), reverse = False) return render(request,'myapp/index.html',{'sorted_list':sorted_list}) def create(request): if request.method == 'POST': title = request.POST.get('title') desc = request.POST.get('desc') d = {} d['title'] = title d['desc'] = desc if create_a_model(d): return redirect('myapp:home') else: return HttpResponse("Error Occured") return render(request,'myapp/create.html') def delete(request,pk): if delete_a_model(pk): return redirect('myapp:home') else: return HttpResponse("Error Occured") def search(request): if request.method == 'POST': pk = request.POST.get('pk') try: content = read_a_model(pk) except: return render(request,'myapp/search.html',{'error':"No Result Found!"}) return render(request,'myapp/search_results.html',{'content':content}) return render(request,'myapp/search.html') def update(request,pk): if request.method == 'POST': title = request.POST.get('title') desc = request.POST.get('desc') d = {} d['title'] = title d['desc'] = desc if update_a_model(pk,d): return redirect('myapp:home') else: return HttpResponse("Error Occured") content = read_a_model(pk) return render(request,'myapp/update.html',{'content':content})
flexible
{ "blob_id": "dc4de382ab16f036c6174e711f5c9fe52868ccc9", "index": 8445, "step-1": "<mask token>\n\n\ndef delete(request, pk):\n if delete_a_model(pk):\n return redirect('myapp:home')\n else:\n return HttpResponse('Error Occured')\n\n\ndef search(request):\n if request.method == 'POST':\n pk = request.POST.get('pk')\n try:\n content = read_a_model(pk)\n except:\n return render(request, 'myapp/search.html', {'error':\n 'No Result Found!'})\n return render(request, 'myapp/search_results.html', {'content':\n content})\n return render(request, 'myapp/search.html')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef home(request):\n content = read_all_models()\n sorted_list = sorted(content, key=lambda k: k['title'].title(), reverse\n =False)\n return render(request, 'myapp/index.html', {'sorted_list': sorted_list})\n\n\ndef create(request):\n if request.method == 'POST':\n title = request.POST.get('title')\n desc = request.POST.get('desc')\n d = {}\n d['title'] = title\n d['desc'] = desc\n if create_a_model(d):\n return redirect('myapp:home')\n else:\n return HttpResponse('Error Occured')\n return render(request, 'myapp/create.html')\n\n\ndef delete(request, pk):\n if delete_a_model(pk):\n return redirect('myapp:home')\n else:\n return HttpResponse('Error Occured')\n\n\ndef search(request):\n if request.method == 'POST':\n pk = request.POST.get('pk')\n try:\n content = read_a_model(pk)\n except:\n return render(request, 'myapp/search.html', {'error':\n 'No Result Found!'})\n return render(request, 'myapp/search_results.html', {'content':\n content})\n return render(request, 'myapp/search.html')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef home(request):\n content = read_all_models()\n sorted_list = sorted(content, key=lambda k: k['title'].title(), reverse\n =False)\n return render(request, 'myapp/index.html', {'sorted_list': sorted_list})\n\n\ndef create(request):\n if request.method == 'POST':\n title = request.POST.get('title')\n desc = request.POST.get('desc')\n d = {}\n d['title'] = title\n d['desc'] = desc\n if create_a_model(d):\n return redirect('myapp:home')\n else:\n return HttpResponse('Error Occured')\n return render(request, 'myapp/create.html')\n\n\ndef delete(request, pk):\n if delete_a_model(pk):\n return redirect('myapp:home')\n else:\n return HttpResponse('Error Occured')\n\n\ndef search(request):\n if request.method == 'POST':\n pk = request.POST.get('pk')\n try:\n content = read_a_model(pk)\n except:\n return render(request, 'myapp/search.html', {'error':\n 'No Result Found!'})\n return render(request, 'myapp/search_results.html', {'content':\n content})\n return render(request, 'myapp/search.html')\n\n\ndef update(request, pk):\n if request.method == 'POST':\n title = request.POST.get('title')\n desc = request.POST.get('desc')\n d = {}\n d['title'] = title\n d['desc'] = desc\n if update_a_model(pk, d):\n return redirect('myapp:home')\n else:\n return HttpResponse('Error Occured')\n content = read_a_model(pk)\n return render(request, 'myapp/update.html', {'content': content})\n", "step-4": "from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .tasks import read_all_models, update_a_model, delete_a_model, read_a_model, create_a_model\nfrom .forms import MyModelForm\n\n\ndef home(request):\n content = read_all_models()\n sorted_list = sorted(content, key=lambda k: k['title'].title(), reverse\n =False)\n return render(request, 'myapp/index.html', {'sorted_list': sorted_list})\n\n\ndef create(request):\n if request.method == 'POST':\n title = request.POST.get('title')\n desc = request.POST.get('desc')\n d = {}\n d['title'] = title\n d['desc'] = desc\n if create_a_model(d):\n return redirect('myapp:home')\n else:\n return HttpResponse('Error Occured')\n return render(request, 'myapp/create.html')\n\n\ndef delete(request, pk):\n if delete_a_model(pk):\n return redirect('myapp:home')\n else:\n return HttpResponse('Error Occured')\n\n\ndef search(request):\n if request.method == 'POST':\n pk = request.POST.get('pk')\n try:\n content = read_a_model(pk)\n except:\n return render(request, 'myapp/search.html', {'error':\n 'No Result Found!'})\n return render(request, 'myapp/search_results.html', {'content':\n content})\n return render(request, 'myapp/search.html')\n\n\ndef update(request, pk):\n if request.method == 'POST':\n title = request.POST.get('title')\n desc = request.POST.get('desc')\n d = {}\n d['title'] = title\n d['desc'] = desc\n if update_a_model(pk, d):\n return redirect('myapp:home')\n else:\n return HttpResponse('Error Occured')\n content = read_a_model(pk)\n return render(request, 'myapp/update.html', {'content': content})\n", "step-5": "from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom .tasks import read_all_models, update_a_model, delete_a_model, read_a_model, create_a_model\nfrom .forms import MyModelForm\n\ndef home(request):\n\tcontent = read_all_models()\n\tsorted_list = sorted(content, key=lambda k: k['title'].title(), reverse = False)\n\treturn render(request,'myapp/index.html',{'sorted_list':sorted_list})\n\ndef create(request):\n\tif request.method == 'POST':\n\t\ttitle = request.POST.get('title')\n\t\tdesc = request.POST.get('desc')\n\t\td = {}\n\t\td['title'] = title\n\t\td['desc'] = desc\n\t\tif create_a_model(d):\n\t\t\treturn redirect('myapp:home')\n\t\telse:\n\t\t\treturn HttpResponse(\"Error Occured\")\t\t\n\treturn render(request,'myapp/create.html')\t\n\ndef delete(request,pk):\n\tif delete_a_model(pk):\n\t\treturn redirect('myapp:home')\n\telse:\n\t\treturn HttpResponse(\"Error Occured\")\t\n\ndef search(request):\n\tif request.method == 'POST':\n\t\tpk = request.POST.get('pk')\n\t\ttry:\n\t\t\tcontent = read_a_model(pk)\n\t\texcept:\n\t\t\treturn render(request,'myapp/search.html',{'error':\"No Result Found!\"})\t\t\n\t\treturn render(request,'myapp/search_results.html',{'content':content})\n\treturn render(request,'myapp/search.html')\t\n\ndef update(request,pk):\n\tif request.method == 'POST':\n\t\ttitle = request.POST.get('title')\n\t\tdesc = request.POST.get('desc')\n\t\td = {}\n\t\td['title'] = title\n\t\td['desc'] = desc\n\t\tif update_a_model(pk,d):\n\t\t\treturn redirect('myapp:home')\n\t\telse:\n\t\t\treturn HttpResponse(\"Error Occured\")\n\tcontent = read_a_model(pk)\t\t\t\t\n\treturn render(request,'myapp/update.html',{'content':content})\t\n\t\t\t\n", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
from __future__ import division # floating point division import csv import random import math import numpy as np import dataloader as dtl import classalgorithms as algs def getaccuracy(ytest, predictions): correct = 0 for i in range(len(ytest)): if ytest[i] == predictions[i]: correct += 1 return (correct/float(len(ytest))) * 100.0 def geterror(ytest, predictions): return (100.0-getaccuracy(ytest, predictions)) if __name__ == '__main__': trainsize = 1000 testsize = 5000 numruns = 1 classalgs = {'Random': algs.Classifier(), #'Naive Bayes': algs.NaiveBayes({'notusecolumnones': True}), #'Naive Bayes Ones': algs.NaiveBayes({'notusecolumnones': False}), #'Linear Regression': algs.LinearRegressionClass(), #'Logistic Regression': algs.LogitReg(), #'L1 Logistic Regression': algs.LogitReg({'regularizer': 'l1'}), #'L2 Logistic Regression': algs.LogitReg({'regularizer': 'l2'}), 'Logistic Alternative': algs.LogitRegAlternative(), #'Neural Network': algs.NeuralNet({'epochs': 100,'alpha':.01}) } numalgs = len(classalgs) parameters = ( #Regularization Weight, neural network height? {'regwgt': 0.0, 'nh': 4}, #{'regwgt': 0.01, 'nh': 8}, #{'regwgt': 0.05, 'nh': 16}, #{'regwgt': 0.1, 'nh': 32}, ) numparams = len(parameters) errors = {} for learnername in classalgs: errors[learnername] = np.zeros((numparams,numruns)) for r in range(numruns): print "" print "**********//////////////########### Run Number : ",(r+1),"###########\\\\\\\\\\\\\\\\\\\\\\\\\\\\*********" print "" ## ##Fetching Data; Put Condition Which DataSet To Run ## trainset, testset = dtl.load_susy(trainsize,testsize) #trainset, testset = dtl.load_susy_complete(trainsize,testsize) print('Running on train={0} and test={1} samples for run {2}').format(trainset[0].shape[0], testset[0].shape[0],r) for p in range(numparams): print "" print "********** Parameter : ",(p+1),"**********" print "" params = parameters[p] for learnername, learner in classalgs.iteritems(): # Reset learner for new parameters learner.reset(params) print "\n" print 'Running learner = ' + learnername + ' on parameters ' + str(learner.getparams()) print "" # Train model learner.learn(trainset[0], trainset[1]) # Test model predictions = learner.predict(testset[0]) error = geterror(testset[1], predictions) print 'Error for ' + learnername + ': ' + str(error) errors[learnername][p,r] = error print "" print "Some More Information : " print "" for learnername, learner in classalgs.iteritems(): besterror = np.mean(errors[learnername][0,:]) bestparams = 0 for p in range(numparams): aveerror = np.mean(errors[learnername][p,:]) if aveerror < besterror: besterror = aveerror bestparams = p # Extract best parameters learner.reset(parameters[bestparams]) print 'Best parameters for ' + learnername + ': ' + str(learner.getparams()) print 'Average error for ' + learnername + ': ' + str(besterror) + ' +- ' + str(1.96*np.std(errors[learnername][bestparams,:])/math.sqrt(numruns))
normal
{ "blob_id": "c8ab53c77ff3646a30ca49eaafc275afeadd2ca6", "index": 9545, "step-1": "from __future__ import division # floating point division\nimport csv\nimport random\nimport math\nimport numpy as np\n\nimport dataloader as dtl\nimport classalgorithms as algs\n \n \ndef getaccuracy(ytest, predictions):\n correct = 0\n for i in range(len(ytest)):\n if ytest[i] == predictions[i]:\n correct += 1\n return (correct/float(len(ytest))) * 100.0\n\ndef geterror(ytest, predictions):\n return (100.0-getaccuracy(ytest, predictions))\n\n \nif __name__ == '__main__':\n trainsize = 1000\n testsize = 5000\n numruns = 1\n\n classalgs = {'Random': algs.Classifier(),\n #'Naive Bayes': algs.NaiveBayes({'notusecolumnones': True}),\n #'Naive Bayes Ones': algs.NaiveBayes({'notusecolumnones': False}),\n #'Linear Regression': algs.LinearRegressionClass(),\n #'Logistic Regression': algs.LogitReg(),\n #'L1 Logistic Regression': algs.LogitReg({'regularizer': 'l1'}),\n #'L2 Logistic Regression': algs.LogitReg({'regularizer': 'l2'}),\n 'Logistic Alternative': algs.LogitRegAlternative(), \n #'Neural Network': algs.NeuralNet({'epochs': 100,'alpha':.01})\n } \n numalgs = len(classalgs) \n\n parameters = (\n #Regularization Weight, neural network height?\n {'regwgt': 0.0, 'nh': 4},\n #{'regwgt': 0.01, 'nh': 8},\n #{'regwgt': 0.05, 'nh': 16},\n #{'regwgt': 0.1, 'nh': 32},\n )\n numparams = len(parameters) \n errors = {}\n for learnername in classalgs:\n errors[learnername] = np.zeros((numparams,numruns))\n \n for r in range(numruns):\n print \"\"\n print \"**********//////////////########### Run Number : \",(r+1),\"###########\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\*********\"\n print \"\"\n ##\n ##Fetching Data; Put Condition Which DataSet To Run\n ##\n trainset, testset = dtl.load_susy(trainsize,testsize)\n #trainset, testset = dtl.load_susy_complete(trainsize,testsize)\n\n print('Running on train={0} and test={1} samples for run {2}').format(trainset[0].shape[0], testset[0].shape[0],r)\n\n for p in range(numparams):\n print \"\"\n print \"********** Parameter : \",(p+1),\"**********\"\n print \"\"\n params = parameters[p]\n for learnername, learner in classalgs.iteritems():\n # Reset learner for new parameters\n learner.reset(params)\n print \"\\n\"\n print 'Running learner = ' + learnername + ' on parameters ' + str(learner.getparams())\n print \"\"\n # Train model\n learner.learn(trainset[0], trainset[1])\n # Test model\n predictions = learner.predict(testset[0])\n error = geterror(testset[1], predictions)\n print 'Error for ' + learnername + ': ' + str(error)\n errors[learnername][p,r] = error\n\n\n\n print \"\"\n print \"Some More Information : \"\n print \"\"\n for learnername, learner in classalgs.iteritems():\n besterror = np.mean(errors[learnername][0,:])\n bestparams = 0\n for p in range(numparams):\n aveerror = np.mean(errors[learnername][p,:])\n if aveerror < besterror:\n besterror = aveerror\n bestparams = p\n\n # Extract best parameters \n learner.reset(parameters[bestparams])\n \tprint 'Best parameters for ' + learnername + ': ' + str(learner.getparams())\n \tprint 'Average error for ' + learnername + ': ' + str(besterror) + ' +- ' + str(1.96*np.std(errors[learnername][bestparams,:])/math.sqrt(numruns))\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> def setup_to_transfer_learn(model): """Freeze all layers and compile the model""" for layer in model.layers: layer.trainable = False def add_new_last_layer(base_model, nb_classes): x = base_model.output x = Dropout(0.5, name='drop9')(x) x = Convolution2D(nb_classes, (1, 1), padding='valid', name='conv10')(x) x = Activation('relu', name='relu_conv10')(x) x = GlobalAveragePooling2D()(x) predictions = Activation('softmax')(x) return Model(inputs=base_model.input, outputs=predictions) def setup_to_finetune(model): for layer in model.layers[:11]: layer.trainable = False for layer in model.layers[11:]: layer.trainable = True model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss= 'categorical_crossentropy', metrics=['accuracy']) def train(args): nb_train_samples = get_nb_files(args.train_dir) nb_classes = len(glob.glob(args.train_dir + '/*')) nb_val_samples = get_nb_files(args.val_dir) nb_epoch = int(args.nb_epoch) batch_size = int(args.batch_size) steps_per_epoch = nb_train_samples / batch_size validation_steps = nb_val_samples / batch_size train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input) test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input) train_generator = train_datagen.flow_from_directory(args.train_dir, target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size, shuffle=True) val_generator = test_datagen.flow_from_directory(args.val_dir, target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size, shuffle=True) base_model = SqueezeNet() setup_to_transfer_learn(base_model) model = add_new_last_layer(base_model, nb_classes) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) history_tl = model.fit_generator(generator=train_generator, epochs= nb_epoch, steps_per_epoch=steps_per_epoch, validation_data= val_generator, validation_steps=validation_steps, class_weight='auto') setup_to_finetune(model) history_ft = model.fit_generator(generator=train_generator, epochs= nb_epoch, steps_per_epoch=steps_per_epoch, validation_data= val_generator, validation_steps=validation_steps, class_weight='auto') model.save(args.output_model_file) if args.plot: plot_training(history_ft) def plot_training(history): acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r.') plt.plot(epochs, val_acc, 'r') plt.title('Training and validation accuracy') plt.savefig('accuracy_plot.png') plt.close() plt.plot(epochs, loss, 'r.') plt.plot(epochs, val_loss, 'r-') plt.title('Training and validation loss') plt.savefig('loss_plot.png') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def get_nb_files(dir): if not os.path.exists(dir): return 0 cnt = 0 for r, dirs, files in os.walk(dir): for dr in dirs: cnt += len(glob.glob(os.path.join(r, dr + '/*'))) return cnt def setup_to_transfer_learn(model): """Freeze all layers and compile the model""" for layer in model.layers: layer.trainable = False def add_new_last_layer(base_model, nb_classes): x = base_model.output x = Dropout(0.5, name='drop9')(x) x = Convolution2D(nb_classes, (1, 1), padding='valid', name='conv10')(x) x = Activation('relu', name='relu_conv10')(x) x = GlobalAveragePooling2D()(x) predictions = Activation('softmax')(x) return Model(inputs=base_model.input, outputs=predictions) def setup_to_finetune(model): for layer in model.layers[:11]: layer.trainable = False for layer in model.layers[11:]: layer.trainable = True model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss= 'categorical_crossentropy', metrics=['accuracy']) def train(args): nb_train_samples = get_nb_files(args.train_dir) nb_classes = len(glob.glob(args.train_dir + '/*')) nb_val_samples = get_nb_files(args.val_dir) nb_epoch = int(args.nb_epoch) batch_size = int(args.batch_size) steps_per_epoch = nb_train_samples / batch_size validation_steps = nb_val_samples / batch_size train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input) test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input) train_generator = train_datagen.flow_from_directory(args.train_dir, target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size, shuffle=True) val_generator = test_datagen.flow_from_directory(args.val_dir, target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size, shuffle=True) base_model = SqueezeNet() setup_to_transfer_learn(base_model) model = add_new_last_layer(base_model, nb_classes) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) history_tl = model.fit_generator(generator=train_generator, epochs= nb_epoch, steps_per_epoch=steps_per_epoch, validation_data= val_generator, validation_steps=validation_steps, class_weight='auto') setup_to_finetune(model) history_ft = model.fit_generator(generator=train_generator, epochs= nb_epoch, steps_per_epoch=steps_per_epoch, validation_data= val_generator, validation_steps=validation_steps, class_weight='auto') model.save(args.output_model_file) if args.plot: plot_training(history_ft) def plot_training(history): acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r.') plt.plot(epochs, val_acc, 'r') plt.title('Training and validation accuracy') plt.savefig('accuracy_plot.png') plt.close() plt.plot(epochs, loss, 'r.') plt.plot(epochs, val_loss, 'r-') plt.title('Training and validation loss') plt.savefig('loss_plot.png') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def get_nb_files(dir): if not os.path.exists(dir): return 0 cnt = 0 for r, dirs, files in os.walk(dir): for dr in dirs: cnt += len(glob.glob(os.path.join(r, dr + '/*'))) return cnt def setup_to_transfer_learn(model): """Freeze all layers and compile the model""" for layer in model.layers: layer.trainable = False def add_new_last_layer(base_model, nb_classes): x = base_model.output x = Dropout(0.5, name='drop9')(x) x = Convolution2D(nb_classes, (1, 1), padding='valid', name='conv10')(x) x = Activation('relu', name='relu_conv10')(x) x = GlobalAveragePooling2D()(x) predictions = Activation('softmax')(x) return Model(inputs=base_model.input, outputs=predictions) def setup_to_finetune(model): for layer in model.layers[:11]: layer.trainable = False for layer in model.layers[11:]: layer.trainable = True model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss= 'categorical_crossentropy', metrics=['accuracy']) def train(args): nb_train_samples = get_nb_files(args.train_dir) nb_classes = len(glob.glob(args.train_dir + '/*')) nb_val_samples = get_nb_files(args.val_dir) nb_epoch = int(args.nb_epoch) batch_size = int(args.batch_size) steps_per_epoch = nb_train_samples / batch_size validation_steps = nb_val_samples / batch_size train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input) test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input) train_generator = train_datagen.flow_from_directory(args.train_dir, target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size, shuffle=True) val_generator = test_datagen.flow_from_directory(args.val_dir, target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size, shuffle=True) base_model = SqueezeNet() setup_to_transfer_learn(base_model) model = add_new_last_layer(base_model, nb_classes) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) history_tl = model.fit_generator(generator=train_generator, epochs= nb_epoch, steps_per_epoch=steps_per_epoch, validation_data= val_generator, validation_steps=validation_steps, class_weight='auto') setup_to_finetune(model) history_ft = model.fit_generator(generator=train_generator, epochs= nb_epoch, steps_per_epoch=steps_per_epoch, validation_data= val_generator, validation_steps=validation_steps, class_weight='auto') model.save(args.output_model_file) if args.plot: plot_training(history_ft) def plot_training(history): acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r.') plt.plot(epochs, val_acc, 'r') plt.title('Training and validation accuracy') plt.savefig('accuracy_plot.png') plt.close() plt.plot(epochs, loss, 'r.') plt.plot(epochs, val_loss, 'r-') plt.title('Training and validation loss') plt.savefig('loss_plot.png') if __name__ == '__main__': a = argparse.ArgumentParser() a.add_argument('--train_dir') a.add_argument('--val_dir') a.add_argument('--nb_epoch', default=NB_EPOCHS) a.add_argument('--batch_size', default=BAT_SIZE) a.add_argument('--output_model_file', default='inceptionv3-ft.model') a.add_argument('--plot', action='store_true') args = a.parse_args() if args.train_dir is None or args.val_dir is None: a.print_help() sys.exit(1) if not os.path.exists(args.train_dir) or not os.path.exists(args.val_dir): print('directories do not exist') sys.exit(1) train(args) <|reserved_special_token_1|> import os import sys import glob import argparse import matplotlib.pyplot as plt from keras.applications.imagenet_utils import preprocess_input from keras.models import Model from keras.layers import GlobalAveragePooling2D, Dropout, Convolution2D, Activation from keras.preprocessing.image import ImageDataGenerator from keras.optimizers import SGD from squeezenet import fire_module, SqueezeNet IM_WIDTH, IM_HEIGHT = 227, 227 NB_EPOCHS = 3 BAT_SIZE = 32 def get_nb_files(dir): if not os.path.exists(dir): return 0 cnt = 0 for r, dirs, files in os.walk(dir): for dr in dirs: cnt += len(glob.glob(os.path.join(r, dr + '/*'))) return cnt def setup_to_transfer_learn(model): """Freeze all layers and compile the model""" for layer in model.layers: layer.trainable = False def add_new_last_layer(base_model, nb_classes): x = base_model.output x = Dropout(0.5, name='drop9')(x) x = Convolution2D(nb_classes, (1, 1), padding='valid', name='conv10')(x) x = Activation('relu', name='relu_conv10')(x) x = GlobalAveragePooling2D()(x) predictions = Activation('softmax')(x) return Model(inputs=base_model.input, outputs=predictions) def setup_to_finetune(model): for layer in model.layers[:11]: layer.trainable = False for layer in model.layers[11:]: layer.trainable = True model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss= 'categorical_crossentropy', metrics=['accuracy']) def train(args): nb_train_samples = get_nb_files(args.train_dir) nb_classes = len(glob.glob(args.train_dir + '/*')) nb_val_samples = get_nb_files(args.val_dir) nb_epoch = int(args.nb_epoch) batch_size = int(args.batch_size) steps_per_epoch = nb_train_samples / batch_size validation_steps = nb_val_samples / batch_size train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input) test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input) train_generator = train_datagen.flow_from_directory(args.train_dir, target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size, shuffle=True) val_generator = test_datagen.flow_from_directory(args.val_dir, target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size, shuffle=True) base_model = SqueezeNet() setup_to_transfer_learn(base_model) model = add_new_last_layer(base_model, nb_classes) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) history_tl = model.fit_generator(generator=train_generator, epochs= nb_epoch, steps_per_epoch=steps_per_epoch, validation_data= val_generator, validation_steps=validation_steps, class_weight='auto') setup_to_finetune(model) history_ft = model.fit_generator(generator=train_generator, epochs= nb_epoch, steps_per_epoch=steps_per_epoch, validation_data= val_generator, validation_steps=validation_steps, class_weight='auto') model.save(args.output_model_file) if args.plot: plot_training(history_ft) def plot_training(history): acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r.') plt.plot(epochs, val_acc, 'r') plt.title('Training and validation accuracy') plt.savefig('accuracy_plot.png') plt.close() plt.plot(epochs, loss, 'r.') plt.plot(epochs, val_loss, 'r-') plt.title('Training and validation loss') plt.savefig('loss_plot.png') if __name__ == '__main__': a = argparse.ArgumentParser() a.add_argument('--train_dir') a.add_argument('--val_dir') a.add_argument('--nb_epoch', default=NB_EPOCHS) a.add_argument('--batch_size', default=BAT_SIZE) a.add_argument('--output_model_file', default='inceptionv3-ft.model') a.add_argument('--plot', action='store_true') args = a.parse_args() if args.train_dir is None or args.val_dir is None: a.print_help() sys.exit(1) if not os.path.exists(args.train_dir) or not os.path.exists(args.val_dir): print('directories do not exist') sys.exit(1) train(args) <|reserved_special_token_1|> #adapted from https://github.com/DeepLearningSandbox/DeepLearningSandbox/tree/master/transfer_learning import os import sys import glob import argparse import matplotlib.pyplot as plt from keras.applications.imagenet_utils import preprocess_input from keras.models import Model from keras.layers import GlobalAveragePooling2D,Dropout,Convolution2D,Activation from keras.preprocessing.image import ImageDataGenerator from keras.optimizers import SGD from squeezenet import fire_module,SqueezeNet IM_WIDTH, IM_HEIGHT = 227, 227 #fixed size for squeezenet NB_EPOCHS = 3 BAT_SIZE = 32 def get_nb_files(dir): if not os.path.exists(dir): return 0 cnt = 0 for r,dirs,files in os.walk(dir): for dr in dirs: cnt += len(glob.glob(os.path.join(r,dr+"/*"))) return cnt def setup_to_transfer_learn(model): """Freeze all layers and compile the model""" for layer in model.layers: layer.trainable = False #model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) def add_new_last_layer(base_model, nb_classes): x = base_model.output x = Dropout(0.5, name='drop9')(x) x = Convolution2D(nb_classes, (1, 1), padding='valid', name='conv10')(x) x = Activation('relu', name='relu_conv10')(x) x = GlobalAveragePooling2D()(x) predictions = Activation('softmax')(x) return Model(inputs=base_model.input, outputs=predictions) def setup_to_finetune(model): #5 layers in final output, 7 layers per fire module, finetune last 4 fire modules = 28 + 5 = 33 layers unfrozen #67 layers total, 0-indexed #layers 0-33 should be frozen, layers 34-66 trainable #layer 26 = finetune last 5 fire modules for layer in model.layers[:11]: layer.trainable=False for layer in model.layers[11:]: layer.trainable=True model.compile(optimizer=SGD(lr=0.0001,momentum=0.9),loss='categorical_crossentropy',metrics=['accuracy']) def train(args): nb_train_samples = get_nb_files(args.train_dir) nb_classes = len(glob.glob(args.train_dir + "/*")) nb_val_samples = get_nb_files(args.val_dir) nb_epoch = int(args.nb_epoch) batch_size = int(args.batch_size) steps_per_epoch = nb_train_samples/batch_size validation_steps = nb_val_samples/batch_size train_datagen = ImageDataGenerator( preprocessing_function=preprocess_input ) test_datagen = ImageDataGenerator( preprocessing_function=preprocess_input ) train_generator = train_datagen.flow_from_directory( args.train_dir, target_size = (IM_WIDTH,IM_HEIGHT), batch_size = batch_size, shuffle=True ) val_generator = test_datagen.flow_from_directory( args.val_dir, target_size = (IM_WIDTH,IM_HEIGHT), batch_size = batch_size, shuffle=True ) base_model = SqueezeNet() setup_to_transfer_learn(base_model) model = add_new_last_layer(base_model,nb_classes) #sgd = SGD(lr=0.001,decay=0.0002,momentum=0.9) #model.compile(optimizer=sgd,loss='categorical_crossentropy',metrics=['accuracy']) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) history_tl = model.fit_generator( generator=train_generator, epochs=nb_epoch, steps_per_epoch=steps_per_epoch, validation_data=val_generator, validation_steps = validation_steps, class_weight="auto" ) setup_to_finetune(model) history_ft = model.fit_generator( generator=train_generator, epochs=nb_epoch, steps_per_epoch=steps_per_epoch, validation_data=val_generator, validation_steps=validation_steps, class_weight="auto" ) model.save(args.output_model_file) if args.plot: plot_training(history_ft) def plot_training(history): acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r.') plt.plot(epochs, val_acc, 'r') plt.title('Training and validation accuracy') plt.savefig("accuracy_plot.png") plt.close() plt.plot(epochs, loss, 'r.') plt.plot(epochs, val_loss, 'r-') plt.title('Training and validation loss') plt.savefig("loss_plot.png") if __name__=="__main__": a = argparse.ArgumentParser() a.add_argument("--train_dir") a.add_argument("--val_dir") a.add_argument("--nb_epoch", default=NB_EPOCHS) a.add_argument("--batch_size", default=BAT_SIZE) a.add_argument("--output_model_file", default="inceptionv3-ft.model") a.add_argument("--plot", action="store_true") args = a.parse_args() if args.train_dir is None or args.val_dir is None: a.print_help() sys.exit(1) if (not os.path.exists(args.train_dir)) or (not os.path.exists(args.val_dir)): print("directories do not exist") sys.exit(1) train(args)
flexible
{ "blob_id": "39b9106a3b0305db8cc7316be3b76e58e5577b92", "index": 4980, "step-1": "<mask token>\n\n\ndef setup_to_transfer_learn(model):\n \"\"\"Freeze all layers and compile the model\"\"\"\n for layer in model.layers:\n layer.trainable = False\n\n\ndef add_new_last_layer(base_model, nb_classes):\n x = base_model.output\n x = Dropout(0.5, name='drop9')(x)\n x = Convolution2D(nb_classes, (1, 1), padding='valid', name='conv10')(x)\n x = Activation('relu', name='relu_conv10')(x)\n x = GlobalAveragePooling2D()(x)\n predictions = Activation('softmax')(x)\n return Model(inputs=base_model.input, outputs=predictions)\n\n\ndef setup_to_finetune(model):\n for layer in model.layers[:11]:\n layer.trainable = False\n for layer in model.layers[11:]:\n layer.trainable = True\n model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss=\n 'categorical_crossentropy', metrics=['accuracy'])\n\n\ndef train(args):\n nb_train_samples = get_nb_files(args.train_dir)\n nb_classes = len(glob.glob(args.train_dir + '/*'))\n nb_val_samples = get_nb_files(args.val_dir)\n nb_epoch = int(args.nb_epoch)\n batch_size = int(args.batch_size)\n steps_per_epoch = nb_train_samples / batch_size\n validation_steps = nb_val_samples / batch_size\n train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)\n test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)\n train_generator = train_datagen.flow_from_directory(args.train_dir,\n target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size, shuffle=True)\n val_generator = test_datagen.flow_from_directory(args.val_dir,\n target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size, shuffle=True)\n base_model = SqueezeNet()\n setup_to_transfer_learn(base_model)\n model = add_new_last_layer(base_model, nb_classes)\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy',\n metrics=['accuracy'])\n history_tl = model.fit_generator(generator=train_generator, epochs=\n nb_epoch, steps_per_epoch=steps_per_epoch, validation_data=\n val_generator, validation_steps=validation_steps, class_weight='auto')\n setup_to_finetune(model)\n history_ft = model.fit_generator(generator=train_generator, epochs=\n nb_epoch, steps_per_epoch=steps_per_epoch, validation_data=\n val_generator, validation_steps=validation_steps, class_weight='auto')\n model.save(args.output_model_file)\n if args.plot:\n plot_training(history_ft)\n\n\ndef plot_training(history):\n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n epochs = range(len(acc))\n plt.plot(epochs, acc, 'r.')\n plt.plot(epochs, val_acc, 'r')\n plt.title('Training and validation accuracy')\n plt.savefig('accuracy_plot.png')\n plt.close()\n plt.plot(epochs, loss, 'r.')\n plt.plot(epochs, val_loss, 'r-')\n plt.title('Training and validation loss')\n plt.savefig('loss_plot.png')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_nb_files(dir):\n if not os.path.exists(dir):\n return 0\n cnt = 0\n for r, dirs, files in os.walk(dir):\n for dr in dirs:\n cnt += len(glob.glob(os.path.join(r, dr + '/*')))\n return cnt\n\n\ndef setup_to_transfer_learn(model):\n \"\"\"Freeze all layers and compile the model\"\"\"\n for layer in model.layers:\n layer.trainable = False\n\n\ndef add_new_last_layer(base_model, nb_classes):\n x = base_model.output\n x = Dropout(0.5, name='drop9')(x)\n x = Convolution2D(nb_classes, (1, 1), padding='valid', name='conv10')(x)\n x = Activation('relu', name='relu_conv10')(x)\n x = GlobalAveragePooling2D()(x)\n predictions = Activation('softmax')(x)\n return Model(inputs=base_model.input, outputs=predictions)\n\n\ndef setup_to_finetune(model):\n for layer in model.layers[:11]:\n layer.trainable = False\n for layer in model.layers[11:]:\n layer.trainable = True\n model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss=\n 'categorical_crossentropy', metrics=['accuracy'])\n\n\ndef train(args):\n nb_train_samples = get_nb_files(args.train_dir)\n nb_classes = len(glob.glob(args.train_dir + '/*'))\n nb_val_samples = get_nb_files(args.val_dir)\n nb_epoch = int(args.nb_epoch)\n batch_size = int(args.batch_size)\n steps_per_epoch = nb_train_samples / batch_size\n validation_steps = nb_val_samples / batch_size\n train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)\n test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)\n train_generator = train_datagen.flow_from_directory(args.train_dir,\n target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size, shuffle=True)\n val_generator = test_datagen.flow_from_directory(args.val_dir,\n target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size, shuffle=True)\n base_model = SqueezeNet()\n setup_to_transfer_learn(base_model)\n model = add_new_last_layer(base_model, nb_classes)\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy',\n metrics=['accuracy'])\n history_tl = model.fit_generator(generator=train_generator, epochs=\n nb_epoch, steps_per_epoch=steps_per_epoch, validation_data=\n val_generator, validation_steps=validation_steps, class_weight='auto')\n setup_to_finetune(model)\n history_ft = model.fit_generator(generator=train_generator, epochs=\n nb_epoch, steps_per_epoch=steps_per_epoch, validation_data=\n val_generator, validation_steps=validation_steps, class_weight='auto')\n model.save(args.output_model_file)\n if args.plot:\n plot_training(history_ft)\n\n\ndef plot_training(history):\n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n epochs = range(len(acc))\n plt.plot(epochs, acc, 'r.')\n plt.plot(epochs, val_acc, 'r')\n plt.title('Training and validation accuracy')\n plt.savefig('accuracy_plot.png')\n plt.close()\n plt.plot(epochs, loss, 'r.')\n plt.plot(epochs, val_loss, 'r-')\n plt.title('Training and validation loss')\n plt.savefig('loss_plot.png')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef get_nb_files(dir):\n if not os.path.exists(dir):\n return 0\n cnt = 0\n for r, dirs, files in os.walk(dir):\n for dr in dirs:\n cnt += len(glob.glob(os.path.join(r, dr + '/*')))\n return cnt\n\n\ndef setup_to_transfer_learn(model):\n \"\"\"Freeze all layers and compile the model\"\"\"\n for layer in model.layers:\n layer.trainable = False\n\n\ndef add_new_last_layer(base_model, nb_classes):\n x = base_model.output\n x = Dropout(0.5, name='drop9')(x)\n x = Convolution2D(nb_classes, (1, 1), padding='valid', name='conv10')(x)\n x = Activation('relu', name='relu_conv10')(x)\n x = GlobalAveragePooling2D()(x)\n predictions = Activation('softmax')(x)\n return Model(inputs=base_model.input, outputs=predictions)\n\n\ndef setup_to_finetune(model):\n for layer in model.layers[:11]:\n layer.trainable = False\n for layer in model.layers[11:]:\n layer.trainable = True\n model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss=\n 'categorical_crossentropy', metrics=['accuracy'])\n\n\ndef train(args):\n nb_train_samples = get_nb_files(args.train_dir)\n nb_classes = len(glob.glob(args.train_dir + '/*'))\n nb_val_samples = get_nb_files(args.val_dir)\n nb_epoch = int(args.nb_epoch)\n batch_size = int(args.batch_size)\n steps_per_epoch = nb_train_samples / batch_size\n validation_steps = nb_val_samples / batch_size\n train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)\n test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)\n train_generator = train_datagen.flow_from_directory(args.train_dir,\n target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size, shuffle=True)\n val_generator = test_datagen.flow_from_directory(args.val_dir,\n target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size, shuffle=True)\n base_model = SqueezeNet()\n setup_to_transfer_learn(base_model)\n model = add_new_last_layer(base_model, nb_classes)\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy',\n metrics=['accuracy'])\n history_tl = model.fit_generator(generator=train_generator, epochs=\n nb_epoch, steps_per_epoch=steps_per_epoch, validation_data=\n val_generator, validation_steps=validation_steps, class_weight='auto')\n setup_to_finetune(model)\n history_ft = model.fit_generator(generator=train_generator, epochs=\n nb_epoch, steps_per_epoch=steps_per_epoch, validation_data=\n val_generator, validation_steps=validation_steps, class_weight='auto')\n model.save(args.output_model_file)\n if args.plot:\n plot_training(history_ft)\n\n\ndef plot_training(history):\n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n epochs = range(len(acc))\n plt.plot(epochs, acc, 'r.')\n plt.plot(epochs, val_acc, 'r')\n plt.title('Training and validation accuracy')\n plt.savefig('accuracy_plot.png')\n plt.close()\n plt.plot(epochs, loss, 'r.')\n plt.plot(epochs, val_loss, 'r-')\n plt.title('Training and validation loss')\n plt.savefig('loss_plot.png')\n\n\nif __name__ == '__main__':\n a = argparse.ArgumentParser()\n a.add_argument('--train_dir')\n a.add_argument('--val_dir')\n a.add_argument('--nb_epoch', default=NB_EPOCHS)\n a.add_argument('--batch_size', default=BAT_SIZE)\n a.add_argument('--output_model_file', default='inceptionv3-ft.model')\n a.add_argument('--plot', action='store_true')\n args = a.parse_args()\n if args.train_dir is None or args.val_dir is None:\n a.print_help()\n sys.exit(1)\n if not os.path.exists(args.train_dir) or not os.path.exists(args.val_dir):\n print('directories do not exist')\n sys.exit(1)\n train(args)\n", "step-4": "import os\nimport sys\nimport glob\nimport argparse\nimport matplotlib.pyplot as plt\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom keras.models import Model\nfrom keras.layers import GlobalAveragePooling2D, Dropout, Convolution2D, Activation\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import SGD\nfrom squeezenet import fire_module, SqueezeNet\nIM_WIDTH, IM_HEIGHT = 227, 227\nNB_EPOCHS = 3\nBAT_SIZE = 32\n\n\ndef get_nb_files(dir):\n if not os.path.exists(dir):\n return 0\n cnt = 0\n for r, dirs, files in os.walk(dir):\n for dr in dirs:\n cnt += len(glob.glob(os.path.join(r, dr + '/*')))\n return cnt\n\n\ndef setup_to_transfer_learn(model):\n \"\"\"Freeze all layers and compile the model\"\"\"\n for layer in model.layers:\n layer.trainable = False\n\n\ndef add_new_last_layer(base_model, nb_classes):\n x = base_model.output\n x = Dropout(0.5, name='drop9')(x)\n x = Convolution2D(nb_classes, (1, 1), padding='valid', name='conv10')(x)\n x = Activation('relu', name='relu_conv10')(x)\n x = GlobalAveragePooling2D()(x)\n predictions = Activation('softmax')(x)\n return Model(inputs=base_model.input, outputs=predictions)\n\n\ndef setup_to_finetune(model):\n for layer in model.layers[:11]:\n layer.trainable = False\n for layer in model.layers[11:]:\n layer.trainable = True\n model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss=\n 'categorical_crossentropy', metrics=['accuracy'])\n\n\ndef train(args):\n nb_train_samples = get_nb_files(args.train_dir)\n nb_classes = len(glob.glob(args.train_dir + '/*'))\n nb_val_samples = get_nb_files(args.val_dir)\n nb_epoch = int(args.nb_epoch)\n batch_size = int(args.batch_size)\n steps_per_epoch = nb_train_samples / batch_size\n validation_steps = nb_val_samples / batch_size\n train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)\n test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)\n train_generator = train_datagen.flow_from_directory(args.train_dir,\n target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size, shuffle=True)\n val_generator = test_datagen.flow_from_directory(args.val_dir,\n target_size=(IM_WIDTH, IM_HEIGHT), batch_size=batch_size, shuffle=True)\n base_model = SqueezeNet()\n setup_to_transfer_learn(base_model)\n model = add_new_last_layer(base_model, nb_classes)\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy',\n metrics=['accuracy'])\n history_tl = model.fit_generator(generator=train_generator, epochs=\n nb_epoch, steps_per_epoch=steps_per_epoch, validation_data=\n val_generator, validation_steps=validation_steps, class_weight='auto')\n setup_to_finetune(model)\n history_ft = model.fit_generator(generator=train_generator, epochs=\n nb_epoch, steps_per_epoch=steps_per_epoch, validation_data=\n val_generator, validation_steps=validation_steps, class_weight='auto')\n model.save(args.output_model_file)\n if args.plot:\n plot_training(history_ft)\n\n\ndef plot_training(history):\n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n epochs = range(len(acc))\n plt.plot(epochs, acc, 'r.')\n plt.plot(epochs, val_acc, 'r')\n plt.title('Training and validation accuracy')\n plt.savefig('accuracy_plot.png')\n plt.close()\n plt.plot(epochs, loss, 'r.')\n plt.plot(epochs, val_loss, 'r-')\n plt.title('Training and validation loss')\n plt.savefig('loss_plot.png')\n\n\nif __name__ == '__main__':\n a = argparse.ArgumentParser()\n a.add_argument('--train_dir')\n a.add_argument('--val_dir')\n a.add_argument('--nb_epoch', default=NB_EPOCHS)\n a.add_argument('--batch_size', default=BAT_SIZE)\n a.add_argument('--output_model_file', default='inceptionv3-ft.model')\n a.add_argument('--plot', action='store_true')\n args = a.parse_args()\n if args.train_dir is None or args.val_dir is None:\n a.print_help()\n sys.exit(1)\n if not os.path.exists(args.train_dir) or not os.path.exists(args.val_dir):\n print('directories do not exist')\n sys.exit(1)\n train(args)\n", "step-5": "#adapted from https://github.com/DeepLearningSandbox/DeepLearningSandbox/tree/master/transfer_learning\n\nimport os\nimport sys\nimport glob\nimport argparse\nimport matplotlib.pyplot as plt\n\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom keras.models import Model\nfrom keras.layers import GlobalAveragePooling2D,Dropout,Convolution2D,Activation\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import SGD\n\nfrom squeezenet import fire_module,SqueezeNet\n\nIM_WIDTH, IM_HEIGHT = 227, 227 #fixed size for squeezenet\nNB_EPOCHS = 3\nBAT_SIZE = 32\n\ndef get_nb_files(dir):\n if not os.path.exists(dir):\n return 0\n cnt = 0\n for r,dirs,files in os.walk(dir):\n for dr in dirs:\n cnt += len(glob.glob(os.path.join(r,dr+\"/*\")))\n return cnt\n\ndef setup_to_transfer_learn(model):\n \"\"\"Freeze all layers and compile the model\"\"\"\n for layer in model.layers:\n layer.trainable = False\n\n #model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])\n\ndef add_new_last_layer(base_model, nb_classes):\n x = base_model.output\n x = Dropout(0.5, name='drop9')(x)\n x = Convolution2D(nb_classes, (1, 1), padding='valid', name='conv10')(x)\n x = Activation('relu', name='relu_conv10')(x)\n x = GlobalAveragePooling2D()(x)\n predictions = Activation('softmax')(x)\n return Model(inputs=base_model.input, outputs=predictions)\n\ndef setup_to_finetune(model):\n #5 layers in final output, 7 layers per fire module, finetune last 4 fire modules = 28 + 5 = 33 layers unfrozen\n #67 layers total, 0-indexed\n #layers 0-33 should be frozen, layers 34-66 trainable\n #layer 26 = finetune last 5 fire modules\n\n for layer in model.layers[:11]:\n layer.trainable=False\n for layer in model.layers[11:]:\n layer.trainable=True\n model.compile(optimizer=SGD(lr=0.0001,momentum=0.9),loss='categorical_crossentropy',metrics=['accuracy'])\n\ndef train(args):\n\n nb_train_samples = get_nb_files(args.train_dir)\n nb_classes = len(glob.glob(args.train_dir + \"/*\"))\n nb_val_samples = get_nb_files(args.val_dir)\n nb_epoch = int(args.nb_epoch)\n batch_size = int(args.batch_size)\n steps_per_epoch = nb_train_samples/batch_size\n validation_steps = nb_val_samples/batch_size\n\n train_datagen = ImageDataGenerator(\n preprocessing_function=preprocess_input\n )\n\n test_datagen = ImageDataGenerator(\n preprocessing_function=preprocess_input\n )\n\n train_generator = train_datagen.flow_from_directory(\n args.train_dir,\n target_size = (IM_WIDTH,IM_HEIGHT),\n batch_size = batch_size,\n shuffle=True\n )\n\n val_generator = test_datagen.flow_from_directory(\n args.val_dir,\n target_size = (IM_WIDTH,IM_HEIGHT),\n batch_size = batch_size,\n shuffle=True\n )\n\n base_model = SqueezeNet()\n setup_to_transfer_learn(base_model)\n model = add_new_last_layer(base_model,nb_classes)\n\n #sgd = SGD(lr=0.001,decay=0.0002,momentum=0.9)\n #model.compile(optimizer=sgd,loss='categorical_crossentropy',metrics=['accuracy'])\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])\n\n history_tl = model.fit_generator(\n generator=train_generator,\n epochs=nb_epoch,\n steps_per_epoch=steps_per_epoch,\n validation_data=val_generator,\n validation_steps = validation_steps,\n class_weight=\"auto\"\n )\n\n setup_to_finetune(model)\n\n history_ft = model.fit_generator(\n generator=train_generator,\n epochs=nb_epoch,\n steps_per_epoch=steps_per_epoch,\n validation_data=val_generator,\n validation_steps=validation_steps,\n class_weight=\"auto\"\n )\n\n model.save(args.output_model_file)\n\n if args.plot:\n plot_training(history_ft)\n\n\ndef plot_training(history):\n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n epochs = range(len(acc))\n\n plt.plot(epochs, acc, 'r.')\n plt.plot(epochs, val_acc, 'r')\n plt.title('Training and validation accuracy')\n plt.savefig(\"accuracy_plot.png\")\n plt.close()\n\n plt.plot(epochs, loss, 'r.')\n plt.plot(epochs, val_loss, 'r-')\n plt.title('Training and validation loss')\n plt.savefig(\"loss_plot.png\")\n\nif __name__==\"__main__\":\n a = argparse.ArgumentParser()\n a.add_argument(\"--train_dir\")\n a.add_argument(\"--val_dir\")\n a.add_argument(\"--nb_epoch\", default=NB_EPOCHS)\n a.add_argument(\"--batch_size\", default=BAT_SIZE)\n a.add_argument(\"--output_model_file\", default=\"inceptionv3-ft.model\")\n a.add_argument(\"--plot\", action=\"store_true\")\n\n args = a.parse_args()\n if args.train_dir is None or args.val_dir is None:\n a.print_help()\n sys.exit(1)\n\n if (not os.path.exists(args.train_dir)) or (not os.path.exists(args.val_dir)):\n print(\"directories do not exist\")\n sys.exit(1)\n\n train(args)", "step-ids": [ 5, 6, 7, 9, 10 ] }
[ 5, 6, 7, 9, 10 ]
<|reserved_special_token_0|> class GoogleTTS: <|reserved_special_token_0|> def check_google_connection(self): try: message = 'Hallo' filename = 'temp_voice.mp3' tts = gTTS(text=message, lang='de') tts.save(filename) os.remove(filename) return True except Exception as err: logging.error('Error during Google TTS testing {}'.format(err)) return False class SapiTTS: def __init__(self): self.engine = pyttsx3.init('sapi5') rate = self.engine.getProperty('rate') self.engine.setProperty('rate', rate - 20) self.engine.setProperty('volume', 0.9) def utter_voice_message(self, message): try: self.engine.say(message) self.engine.runAndWait() return 'TTS finished' except Exception as err: logging.error('Error during TTS {}'.format(err)) return None <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class GoogleTTS: def utter_voice_message(self, message): try: filename = 'temp_voice.mp3' tts = gTTS(text=message, lang='de', slow=False) tts.save(filename) media = pyglet.media.load(filename, streaming=True) media.play() time.sleep(media.duration) return 'TTS finished' except Exception as err: logging.error('Error during TTS {}'.format(err)) return None def check_google_connection(self): try: message = 'Hallo' filename = 'temp_voice.mp3' tts = gTTS(text=message, lang='de') tts.save(filename) os.remove(filename) return True except Exception as err: logging.error('Error during Google TTS testing {}'.format(err)) return False class SapiTTS: def __init__(self): self.engine = pyttsx3.init('sapi5') rate = self.engine.getProperty('rate') self.engine.setProperty('rate', rate - 20) self.engine.setProperty('volume', 0.9) def utter_voice_message(self, message): try: self.engine.say(message) self.engine.runAndWait() return 'TTS finished' except Exception as err: logging.error('Error during TTS {}'.format(err)) return None if __name__ == '__main__': gtts = GoogleTTS() gtts.utter_voice_message('Guten Tag, mein Name ist Carina') <|reserved_special_token_1|> <|reserved_special_token_0|> ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) class GoogleTTS: def utter_voice_message(self, message): try: filename = 'temp_voice.mp3' tts = gTTS(text=message, lang='de', slow=False) tts.save(filename) media = pyglet.media.load(filename, streaming=True) media.play() time.sleep(media.duration) return 'TTS finished' except Exception as err: logging.error('Error during TTS {}'.format(err)) return None def check_google_connection(self): try: message = 'Hallo' filename = 'temp_voice.mp3' tts = gTTS(text=message, lang='de') tts.save(filename) os.remove(filename) return True except Exception as err: logging.error('Error during Google TTS testing {}'.format(err)) return False class SapiTTS: def __init__(self): self.engine = pyttsx3.init('sapi5') rate = self.engine.getProperty('rate') self.engine.setProperty('rate', rate - 20) self.engine.setProperty('volume', 0.9) def utter_voice_message(self, message): try: self.engine.say(message) self.engine.runAndWait() return 'TTS finished' except Exception as err: logging.error('Error during TTS {}'.format(err)) return None if __name__ == '__main__': gtts = GoogleTTS() gtts.utter_voice_message('Guten Tag, mein Name ist Carina') <|reserved_special_token_1|> import pyttsx3 import pyglet import time import logging import os from gtts import gTTS ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) class GoogleTTS: def utter_voice_message(self, message): try: filename = 'temp_voice.mp3' tts = gTTS(text=message, lang='de', slow=False) tts.save(filename) media = pyglet.media.load(filename, streaming=True) media.play() time.sleep(media.duration) return 'TTS finished' except Exception as err: logging.error('Error during TTS {}'.format(err)) return None def check_google_connection(self): try: message = 'Hallo' filename = 'temp_voice.mp3' tts = gTTS(text=message, lang='de') tts.save(filename) os.remove(filename) return True except Exception as err: logging.error('Error during Google TTS testing {}'.format(err)) return False class SapiTTS: def __init__(self): self.engine = pyttsx3.init('sapi5') rate = self.engine.getProperty('rate') self.engine.setProperty('rate', rate - 20) self.engine.setProperty('volume', 0.9) def utter_voice_message(self, message): try: self.engine.say(message) self.engine.runAndWait() return 'TTS finished' except Exception as err: logging.error('Error during TTS {}'.format(err)) return None if __name__ == '__main__': gtts = GoogleTTS() gtts.utter_voice_message('Guten Tag, mein Name ist Carina') <|reserved_special_token_1|> import pyttsx3 import pyglet import time import logging import os from gtts import gTTS ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) class GoogleTTS: def utter_voice_message(self, message): try: # Google Text-to-Speech API - needs internet connectivity #filename = ROOT_DIR + '\\temp_voice.mp3' filename = 'temp_voice.mp3' tts = gTTS(text=message, lang='de', slow=False) tts.save(filename) media = pyglet.media.load(filename, streaming=True) media.play() time.sleep(media.duration) #os.remove(filename) return 'TTS finished' except Exception as err: logging.error("Error during TTS {}".format(err)) return None def check_google_connection(self): try: message = "Hallo" filename = 'temp_voice.mp3' tts = gTTS(text=message, lang='de') tts.save(filename) os.remove(filename) return True except Exception as err: logging.error("Error during Google TTS testing {}".format(err)) return False class SapiTTS: def __init__(self): # Sapi Microsoft speech engine - works offline self.engine = pyttsx3.init('sapi5') # use SAPI5 engine rate = self.engine.getProperty('rate') self.engine.setProperty('rate', rate - 20) # words per minute self.engine.setProperty('volume', 0.9) def utter_voice_message(self, message): try: self.engine.say(message) self.engine.runAndWait() return 'TTS finished' except Exception as err: logging.error("Error during TTS {}".format(err)) return None if __name__ == '__main__': gtts = GoogleTTS() gtts.utter_voice_message('Guten Tag, mein Name ist Carina')
flexible
{ "blob_id": "9ed674513bebe65ece538e9ce2b3945bb0c532cc", "index": 1357, "step-1": "<mask token>\n\n\nclass GoogleTTS:\n <mask token>\n\n def check_google_connection(self):\n try:\n message = 'Hallo'\n filename = 'temp_voice.mp3'\n tts = gTTS(text=message, lang='de')\n tts.save(filename)\n os.remove(filename)\n return True\n except Exception as err:\n logging.error('Error during Google TTS testing {}'.format(err))\n return False\n\n\nclass SapiTTS:\n\n def __init__(self):\n self.engine = pyttsx3.init('sapi5')\n rate = self.engine.getProperty('rate')\n self.engine.setProperty('rate', rate - 20)\n self.engine.setProperty('volume', 0.9)\n\n def utter_voice_message(self, message):\n try:\n self.engine.say(message)\n self.engine.runAndWait()\n return 'TTS finished'\n except Exception as err:\n logging.error('Error during TTS {}'.format(err))\n return None\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass GoogleTTS:\n\n def utter_voice_message(self, message):\n try:\n filename = 'temp_voice.mp3'\n tts = gTTS(text=message, lang='de', slow=False)\n tts.save(filename)\n media = pyglet.media.load(filename, streaming=True)\n media.play()\n time.sleep(media.duration)\n return 'TTS finished'\n except Exception as err:\n logging.error('Error during TTS {}'.format(err))\n return None\n\n def check_google_connection(self):\n try:\n message = 'Hallo'\n filename = 'temp_voice.mp3'\n tts = gTTS(text=message, lang='de')\n tts.save(filename)\n os.remove(filename)\n return True\n except Exception as err:\n logging.error('Error during Google TTS testing {}'.format(err))\n return False\n\n\nclass SapiTTS:\n\n def __init__(self):\n self.engine = pyttsx3.init('sapi5')\n rate = self.engine.getProperty('rate')\n self.engine.setProperty('rate', rate - 20)\n self.engine.setProperty('volume', 0.9)\n\n def utter_voice_message(self, message):\n try:\n self.engine.say(message)\n self.engine.runAndWait()\n return 'TTS finished'\n except Exception as err:\n logging.error('Error during TTS {}'.format(err))\n return None\n\n\nif __name__ == '__main__':\n gtts = GoogleTTS()\n gtts.utter_voice_message('Guten Tag, mein Name ist Carina')\n", "step-3": "<mask token>\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass GoogleTTS:\n\n def utter_voice_message(self, message):\n try:\n filename = 'temp_voice.mp3'\n tts = gTTS(text=message, lang='de', slow=False)\n tts.save(filename)\n media = pyglet.media.load(filename, streaming=True)\n media.play()\n time.sleep(media.duration)\n return 'TTS finished'\n except Exception as err:\n logging.error('Error during TTS {}'.format(err))\n return None\n\n def check_google_connection(self):\n try:\n message = 'Hallo'\n filename = 'temp_voice.mp3'\n tts = gTTS(text=message, lang='de')\n tts.save(filename)\n os.remove(filename)\n return True\n except Exception as err:\n logging.error('Error during Google TTS testing {}'.format(err))\n return False\n\n\nclass SapiTTS:\n\n def __init__(self):\n self.engine = pyttsx3.init('sapi5')\n rate = self.engine.getProperty('rate')\n self.engine.setProperty('rate', rate - 20)\n self.engine.setProperty('volume', 0.9)\n\n def utter_voice_message(self, message):\n try:\n self.engine.say(message)\n self.engine.runAndWait()\n return 'TTS finished'\n except Exception as err:\n logging.error('Error during TTS {}'.format(err))\n return None\n\n\nif __name__ == '__main__':\n gtts = GoogleTTS()\n gtts.utter_voice_message('Guten Tag, mein Name ist Carina')\n", "step-4": "import pyttsx3\nimport pyglet\nimport time\nimport logging\nimport os\nfrom gtts import gTTS\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass GoogleTTS:\n\n def utter_voice_message(self, message):\n try:\n filename = 'temp_voice.mp3'\n tts = gTTS(text=message, lang='de', slow=False)\n tts.save(filename)\n media = pyglet.media.load(filename, streaming=True)\n media.play()\n time.sleep(media.duration)\n return 'TTS finished'\n except Exception as err:\n logging.error('Error during TTS {}'.format(err))\n return None\n\n def check_google_connection(self):\n try:\n message = 'Hallo'\n filename = 'temp_voice.mp3'\n tts = gTTS(text=message, lang='de')\n tts.save(filename)\n os.remove(filename)\n return True\n except Exception as err:\n logging.error('Error during Google TTS testing {}'.format(err))\n return False\n\n\nclass SapiTTS:\n\n def __init__(self):\n self.engine = pyttsx3.init('sapi5')\n rate = self.engine.getProperty('rate')\n self.engine.setProperty('rate', rate - 20)\n self.engine.setProperty('volume', 0.9)\n\n def utter_voice_message(self, message):\n try:\n self.engine.say(message)\n self.engine.runAndWait()\n return 'TTS finished'\n except Exception as err:\n logging.error('Error during TTS {}'.format(err))\n return None\n\n\nif __name__ == '__main__':\n gtts = GoogleTTS()\n gtts.utter_voice_message('Guten Tag, mein Name ist Carina')\n", "step-5": "import pyttsx3\nimport pyglet\nimport time\nimport logging\nimport os\n\nfrom gtts import gTTS\n\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass GoogleTTS:\n def utter_voice_message(self, message):\n try:\n # Google Text-to-Speech API - needs internet connectivity\n #filename = ROOT_DIR + '\\\\temp_voice.mp3'\n filename = 'temp_voice.mp3'\n tts = gTTS(text=message, lang='de', slow=False)\n tts.save(filename)\n\n media = pyglet.media.load(filename, streaming=True)\n media.play()\n time.sleep(media.duration)\n #os.remove(filename)\n\n return 'TTS finished'\n except Exception as err:\n logging.error(\"Error during TTS {}\".format(err))\n return None\n\n def check_google_connection(self):\n try:\n message = \"Hallo\"\n filename = 'temp_voice.mp3'\n tts = gTTS(text=message, lang='de')\n tts.save(filename)\n os.remove(filename)\n return True\n except Exception as err:\n logging.error(\"Error during Google TTS testing {}\".format(err))\n return False\n\n\nclass SapiTTS:\n def __init__(self):\n # Sapi Microsoft speech engine - works offline\n self.engine = pyttsx3.init('sapi5') # use SAPI5 engine\n rate = self.engine.getProperty('rate')\n self.engine.setProperty('rate', rate - 20) # words per minute\n self.engine.setProperty('volume', 0.9)\n\n def utter_voice_message(self, message):\n try:\n self.engine.say(message)\n self.engine.runAndWait()\n\n return 'TTS finished'\n except Exception as err:\n logging.error(\"Error during TTS {}\".format(err))\n return None\n\n\nif __name__ == '__main__':\n gtts = GoogleTTS()\n gtts.utter_voice_message('Guten Tag, mein Name ist Carina')\n\n", "step-ids": [ 5, 7, 8, 9, 10 ] }
[ 5, 7, 8, 9, 10 ]
import os import json from .utils import * def _unique_predict(solve_list): valid_solve_list = filter(lambda x: x[0] is not None, solve_list) valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0]) unique_solve_list = list() current_no = -1 for e in valid_solve_list: if current_no != e[0]: current_no = e[0] unique_solve_list.append(e) return unique_solve_list @safe_one_retval_wrapper def _analysis_data(answer_root, kind, result): if result["pass"] != 1: result["score"] = -1 raise Exception(result['message']) predict_suites = result["predict_suites"] total = 0 correct = 0 # unique predict suites for suite in predict_suites: with open(os.path.join(answer_root, suite + ".answer.json"), "r", encoding="utf-8") as fh: answer_dict = json.load(fh) # get unique solve list by id (the first element) solve_list = _unique_predict(predict_suites[suite]) total = total + len(answer_dict) for q in solve_list: if q[1] == answer_dict[str(q[0])]['answer']: correct = correct + 1 total = total if total else 1 return correct / total def analysis_data(answer_root, kind, result): if result.get('pass') == -1: return {"pass": -1, "score": -1, "message": None} message, score = _analysis_data(answer_root, kind, result) if message is None: return {"pass": 1, "score": score, "message": message} return {"pass": 0, "score": -1, "message": message} @safe_one_retval_wrapper def _run_analysis(data_root, work_root, answer_root): with open(os.path.join(data_root, "config.json"), "r", encoding="utf-8") as fh: config = json.load(fh) predict_file = os.path.join(work_root, "output.answer.json") with open(predict_file, "r", encoding="utf-8") as fh: predict = json.load(fh) analysis_result = {} for kind, result in predict.items(): analysis_result[kind] = analysis_data(answer_root, kind, result) path = os.path.join(work_root, "result.json") with open(path, "w", encoding="utf-8") as fh: json.dump(analysis_result, fh, ensure_ascii=False) return True def run_analysis(data_root, work_root, answer_root): msg, code = _run_analysis(data_root, work_root, answer_root) result_file = os.path.join(work_root, "result.json") if msg is None: print("Succ:output to %s" % result_file) else: with open(result_file, "w", encoding="utf-8") as fh: fh.write(msg) print("Fail:output to %s" % result_file) return msg, code
normal
{ "blob_id": "00a1b5f20f15994a659eda56201ba7c45d49a4db", "index": 4186, "step-1": "<mask token>\n\n\ndef _unique_predict(solve_list):\n valid_solve_list = filter(lambda x: x[0] is not None, solve_list)\n valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0])\n unique_solve_list = list()\n current_no = -1\n for e in valid_solve_list:\n if current_no != e[0]:\n current_no = e[0]\n unique_solve_list.append(e)\n return unique_solve_list\n\n\n<mask token>\n\n\n@safe_one_retval_wrapper\ndef _run_analysis(data_root, work_root, answer_root):\n with open(os.path.join(data_root, 'config.json'), 'r', encoding='utf-8'\n ) as fh:\n config = json.load(fh)\n predict_file = os.path.join(work_root, 'output.answer.json')\n with open(predict_file, 'r', encoding='utf-8') as fh:\n predict = json.load(fh)\n analysis_result = {}\n for kind, result in predict.items():\n analysis_result[kind] = analysis_data(answer_root, kind, result)\n path = os.path.join(work_root, 'result.json')\n with open(path, 'w', encoding='utf-8') as fh:\n json.dump(analysis_result, fh, ensure_ascii=False)\n return True\n\n\ndef run_analysis(data_root, work_root, answer_root):\n msg, code = _run_analysis(data_root, work_root, answer_root)\n result_file = os.path.join(work_root, 'result.json')\n if msg is None:\n print('Succ:output to %s' % result_file)\n else:\n with open(result_file, 'w', encoding='utf-8') as fh:\n fh.write(msg)\n print('Fail:output to %s' % result_file)\n return msg, code\n", "step-2": "<mask token>\n\n\ndef _unique_predict(solve_list):\n valid_solve_list = filter(lambda x: x[0] is not None, solve_list)\n valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0])\n unique_solve_list = list()\n current_no = -1\n for e in valid_solve_list:\n if current_no != e[0]:\n current_no = e[0]\n unique_solve_list.append(e)\n return unique_solve_list\n\n\n@safe_one_retval_wrapper\ndef _analysis_data(answer_root, kind, result):\n if result['pass'] != 1:\n result['score'] = -1\n raise Exception(result['message'])\n predict_suites = result['predict_suites']\n total = 0\n correct = 0\n for suite in predict_suites:\n with open(os.path.join(answer_root, suite + '.answer.json'), 'r',\n encoding='utf-8') as fh:\n answer_dict = json.load(fh)\n solve_list = _unique_predict(predict_suites[suite])\n total = total + len(answer_dict)\n for q in solve_list:\n if q[1] == answer_dict[str(q[0])]['answer']:\n correct = correct + 1\n total = total if total else 1\n return correct / total\n\n\n<mask token>\n\n\n@safe_one_retval_wrapper\ndef _run_analysis(data_root, work_root, answer_root):\n with open(os.path.join(data_root, 'config.json'), 'r', encoding='utf-8'\n ) as fh:\n config = json.load(fh)\n predict_file = os.path.join(work_root, 'output.answer.json')\n with open(predict_file, 'r', encoding='utf-8') as fh:\n predict = json.load(fh)\n analysis_result = {}\n for kind, result in predict.items():\n analysis_result[kind] = analysis_data(answer_root, kind, result)\n path = os.path.join(work_root, 'result.json')\n with open(path, 'w', encoding='utf-8') as fh:\n json.dump(analysis_result, fh, ensure_ascii=False)\n return True\n\n\ndef run_analysis(data_root, work_root, answer_root):\n msg, code = _run_analysis(data_root, work_root, answer_root)\n result_file = os.path.join(work_root, 'result.json')\n if msg is None:\n print('Succ:output to %s' % result_file)\n else:\n with open(result_file, 'w', encoding='utf-8') as fh:\n fh.write(msg)\n print('Fail:output to %s' % result_file)\n return msg, code\n", "step-3": "<mask token>\n\n\ndef _unique_predict(solve_list):\n valid_solve_list = filter(lambda x: x[0] is not None, solve_list)\n valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0])\n unique_solve_list = list()\n current_no = -1\n for e in valid_solve_list:\n if current_no != e[0]:\n current_no = e[0]\n unique_solve_list.append(e)\n return unique_solve_list\n\n\n@safe_one_retval_wrapper\ndef _analysis_data(answer_root, kind, result):\n if result['pass'] != 1:\n result['score'] = -1\n raise Exception(result['message'])\n predict_suites = result['predict_suites']\n total = 0\n correct = 0\n for suite in predict_suites:\n with open(os.path.join(answer_root, suite + '.answer.json'), 'r',\n encoding='utf-8') as fh:\n answer_dict = json.load(fh)\n solve_list = _unique_predict(predict_suites[suite])\n total = total + len(answer_dict)\n for q in solve_list:\n if q[1] == answer_dict[str(q[0])]['answer']:\n correct = correct + 1\n total = total if total else 1\n return correct / total\n\n\ndef analysis_data(answer_root, kind, result):\n if result.get('pass') == -1:\n return {'pass': -1, 'score': -1, 'message': None}\n message, score = _analysis_data(answer_root, kind, result)\n if message is None:\n return {'pass': 1, 'score': score, 'message': message}\n return {'pass': 0, 'score': -1, 'message': message}\n\n\n@safe_one_retval_wrapper\ndef _run_analysis(data_root, work_root, answer_root):\n with open(os.path.join(data_root, 'config.json'), 'r', encoding='utf-8'\n ) as fh:\n config = json.load(fh)\n predict_file = os.path.join(work_root, 'output.answer.json')\n with open(predict_file, 'r', encoding='utf-8') as fh:\n predict = json.load(fh)\n analysis_result = {}\n for kind, result in predict.items():\n analysis_result[kind] = analysis_data(answer_root, kind, result)\n path = os.path.join(work_root, 'result.json')\n with open(path, 'w', encoding='utf-8') as fh:\n json.dump(analysis_result, fh, ensure_ascii=False)\n return True\n\n\ndef run_analysis(data_root, work_root, answer_root):\n msg, code = _run_analysis(data_root, work_root, answer_root)\n result_file = os.path.join(work_root, 'result.json')\n if msg is None:\n print('Succ:output to %s' % result_file)\n else:\n with open(result_file, 'w', encoding='utf-8') as fh:\n fh.write(msg)\n print('Fail:output to %s' % result_file)\n return msg, code\n", "step-4": "import os\nimport json\nfrom .utils import *\n\n\ndef _unique_predict(solve_list):\n valid_solve_list = filter(lambda x: x[0] is not None, solve_list)\n valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0])\n unique_solve_list = list()\n current_no = -1\n for e in valid_solve_list:\n if current_no != e[0]:\n current_no = e[0]\n unique_solve_list.append(e)\n return unique_solve_list\n\n\n@safe_one_retval_wrapper\ndef _analysis_data(answer_root, kind, result):\n if result['pass'] != 1:\n result['score'] = -1\n raise Exception(result['message'])\n predict_suites = result['predict_suites']\n total = 0\n correct = 0\n for suite in predict_suites:\n with open(os.path.join(answer_root, suite + '.answer.json'), 'r',\n encoding='utf-8') as fh:\n answer_dict = json.load(fh)\n solve_list = _unique_predict(predict_suites[suite])\n total = total + len(answer_dict)\n for q in solve_list:\n if q[1] == answer_dict[str(q[0])]['answer']:\n correct = correct + 1\n total = total if total else 1\n return correct / total\n\n\ndef analysis_data(answer_root, kind, result):\n if result.get('pass') == -1:\n return {'pass': -1, 'score': -1, 'message': None}\n message, score = _analysis_data(answer_root, kind, result)\n if message is None:\n return {'pass': 1, 'score': score, 'message': message}\n return {'pass': 0, 'score': -1, 'message': message}\n\n\n@safe_one_retval_wrapper\ndef _run_analysis(data_root, work_root, answer_root):\n with open(os.path.join(data_root, 'config.json'), 'r', encoding='utf-8'\n ) as fh:\n config = json.load(fh)\n predict_file = os.path.join(work_root, 'output.answer.json')\n with open(predict_file, 'r', encoding='utf-8') as fh:\n predict = json.load(fh)\n analysis_result = {}\n for kind, result in predict.items():\n analysis_result[kind] = analysis_data(answer_root, kind, result)\n path = os.path.join(work_root, 'result.json')\n with open(path, 'w', encoding='utf-8') as fh:\n json.dump(analysis_result, fh, ensure_ascii=False)\n return True\n\n\ndef run_analysis(data_root, work_root, answer_root):\n msg, code = _run_analysis(data_root, work_root, answer_root)\n result_file = os.path.join(work_root, 'result.json')\n if msg is None:\n print('Succ:output to %s' % result_file)\n else:\n with open(result_file, 'w', encoding='utf-8') as fh:\n fh.write(msg)\n print('Fail:output to %s' % result_file)\n return msg, code\n", "step-5": "import os\nimport json\nfrom .utils import *\n\n\ndef _unique_predict(solve_list):\n valid_solve_list = filter(lambda x: x[0] is not None, solve_list)\n valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0])\n unique_solve_list = list()\n current_no = -1\n for e in valid_solve_list:\n if current_no != e[0]:\n current_no = e[0]\n unique_solve_list.append(e)\n return unique_solve_list\n\n\n@safe_one_retval_wrapper\ndef _analysis_data(answer_root, kind, result):\n if result[\"pass\"] != 1:\n result[\"score\"] = -1\n raise Exception(result['message'])\n\n predict_suites = result[\"predict_suites\"]\n total = 0\n correct = 0\n\n # unique predict suites\n\n for suite in predict_suites:\n with open(os.path.join(answer_root, suite + \".answer.json\"), \"r\", encoding=\"utf-8\") as fh:\n answer_dict = json.load(fh)\n # get unique solve list by id (the first element)\n solve_list = _unique_predict(predict_suites[suite])\n\n total = total + len(answer_dict)\n\n for q in solve_list:\n if q[1] == answer_dict[str(q[0])]['answer']:\n correct = correct + 1\n total = total if total else 1\n return correct / total\n\n\ndef analysis_data(answer_root, kind, result):\n if result.get('pass') == -1:\n return {\"pass\": -1, \"score\": -1, \"message\": None}\n\n message, score = _analysis_data(answer_root, kind, result)\n if message is None:\n return {\"pass\": 1, \"score\": score, \"message\": message}\n\n return {\"pass\": 0, \"score\": -1, \"message\": message}\n\n\n@safe_one_retval_wrapper\ndef _run_analysis(data_root, work_root, answer_root):\n with open(os.path.join(data_root, \"config.json\"), \"r\", encoding=\"utf-8\") as fh:\n config = json.load(fh)\n predict_file = os.path.join(work_root, \"output.answer.json\")\n with open(predict_file, \"r\", encoding=\"utf-8\") as fh:\n predict = json.load(fh)\n\n analysis_result = {}\n for kind, result in predict.items():\n analysis_result[kind] = analysis_data(answer_root, kind, result)\n path = os.path.join(work_root, \"result.json\")\n with open(path, \"w\", encoding=\"utf-8\") as fh:\n json.dump(analysis_result, fh, ensure_ascii=False)\n return True\n\n\ndef run_analysis(data_root, work_root, answer_root):\n msg, code = _run_analysis(data_root, work_root, answer_root)\n result_file = os.path.join(work_root, \"result.json\")\n if msg is None:\n print(\"Succ:output to %s\" % result_file)\n else:\n with open(result_file, \"w\", encoding=\"utf-8\") as fh:\n fh.write(msg)\n\n print(\"Fail:output to %s\" % result_file)\n return msg, code\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
{ # Theme information 'name' : 'Clarico CMS Blocks', 'category' : 'Website', 'version' : '1.0', 'summary': '13 CMS Building Blocks', 'description': """""", # Dependencies 'depends': [ 'snippet_style_1', 'snippet_style_2', 'snippet_style_3', 'snippet_style_4', 'snippet_style_5', 'snippet_style_6', 'snippet_style_7', 'snippet_style_8', 'snippet_style_9', 'snippet_style_10', 'snippet_style_11', 'snippet_style_12', 'snippet_style_13', ], # Author 'author': 'Emipro Technologies Pvt. Ltd.', 'website': 'http://www.emiprotechnologies.com', # Technical 'installable': True, }
normal
{ "blob_id": "34f98d4a6a15c9a7b42f237cab204b736dc97136", "index": 1372, "step-1": "<mask token>\n", "step-2": "{'name': 'Clarico CMS Blocks', 'category': 'Website', 'version': '1.0',\n 'summary': '13 CMS Building Blocks', 'description': '', 'depends': [\n 'snippet_style_1', 'snippet_style_2', 'snippet_style_3',\n 'snippet_style_4', 'snippet_style_5', 'snippet_style_6',\n 'snippet_style_7', 'snippet_style_8', 'snippet_style_9',\n 'snippet_style_10', 'snippet_style_11', 'snippet_style_12',\n 'snippet_style_13'], 'author': 'Emipro Technologies Pvt. Ltd.',\n 'website': 'http://www.emiprotechnologies.com', 'installable': True}\n", "step-3": "{\n # Theme information\n 'name' : 'Clarico CMS Blocks',\n 'category' : 'Website',\n 'version' : '1.0',\n 'summary': '13 CMS Building Blocks',\n 'description': \"\"\"\"\"\",\n\n # Dependencies\n 'depends': [\n\t 'snippet_style_1',\n 'snippet_style_2',\n 'snippet_style_3',\n 'snippet_style_4',\n 'snippet_style_5',\n 'snippet_style_6',\n 'snippet_style_7',\n 'snippet_style_8',\n 'snippet_style_9',\n 'snippet_style_10',\n 'snippet_style_11',\n 'snippet_style_12',\n 'snippet_style_13',\n\t\n ],\n\n\n # Author\n 'author': 'Emipro Technologies Pvt. Ltd.',\n 'website': 'http://www.emiprotechnologies.com',\n\n # Technical\n 'installable': True,\n}\n\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class GlobalTestOpenAcademySession(TransactionCase): <|reserved_special_token_0|> def setUp(self): super(GlobalTestOpenAcademySession, self).setUp() self.session = self.env['openacademy.session'] self.partner_vauxoo = self.env.ref('base.res_partner_23') self.course_id = self.env.ref('openacademy.course3') self.partner_attende = self.env.ref('base.res_partner_5') def test_05_instructor_is_attendee(self): """ Check raise: "A session's instructor can't be an attendee" """ with self.assertRaisesRegexp(ValidationError, "A session's instructor can't be an attendee"): self.session.create({'name': 'Session Test 1', 'seats': 1, 'user_id': self.partner_vauxoo.id, 'attendee_ids': [(6, 0, [self.partner_vauxoo.id])], 'course_id': self.course_id.id}) def test_10_wkf_done(self): """ Check that workflow work fine! """ session_test = self.session.create({'name': 'Session Test 2', 'seats': 2, 'user_id': self.partner_vauxoo.id, 'attendee_ids': [(6, 0, [self.partner_attende.id])], 'course_id': self. course_id.id}) self.assertEqual(session_test.state, 'draft', 'Initial state should be in draft') session_test.signal_workflow('button_confirm') self.assertEqual(session_test.state, 'confirmed', "Signal Confirm don't work") session_test.signal_workflow('button_done') self.assertEqual(session_test.state, 'done', "Signal Done don't work") <|reserved_special_token_1|> <|reserved_special_token_0|> class GlobalTestOpenAcademySession(TransactionCase): """ Global Test to openacademy session model. Test create session and trigger constraint """ def setUp(self): super(GlobalTestOpenAcademySession, self).setUp() self.session = self.env['openacademy.session'] self.partner_vauxoo = self.env.ref('base.res_partner_23') self.course_id = self.env.ref('openacademy.course3') self.partner_attende = self.env.ref('base.res_partner_5') def test_05_instructor_is_attendee(self): """ Check raise: "A session's instructor can't be an attendee" """ with self.assertRaisesRegexp(ValidationError, "A session's instructor can't be an attendee"): self.session.create({'name': 'Session Test 1', 'seats': 1, 'user_id': self.partner_vauxoo.id, 'attendee_ids': [(6, 0, [self.partner_vauxoo.id])], 'course_id': self.course_id.id}) def test_10_wkf_done(self): """ Check that workflow work fine! """ session_test = self.session.create({'name': 'Session Test 2', 'seats': 2, 'user_id': self.partner_vauxoo.id, 'attendee_ids': [(6, 0, [self.partner_attende.id])], 'course_id': self. course_id.id}) self.assertEqual(session_test.state, 'draft', 'Initial state should be in draft') session_test.signal_workflow('button_confirm') self.assertEqual(session_test.state, 'confirmed', "Signal Confirm don't work") session_test.signal_workflow('button_done') self.assertEqual(session_test.state, 'done', "Signal Done don't work") <|reserved_special_token_1|> from openerp.tests.common import TransactionCase from openerp.exceptions import ValidationError class GlobalTestOpenAcademySession(TransactionCase): """ Global Test to openacademy session model. Test create session and trigger constraint """ def setUp(self): super(GlobalTestOpenAcademySession, self).setUp() self.session = self.env['openacademy.session'] self.partner_vauxoo = self.env.ref('base.res_partner_23') self.course_id = self.env.ref('openacademy.course3') self.partner_attende = self.env.ref('base.res_partner_5') def test_05_instructor_is_attendee(self): """ Check raise: "A session's instructor can't be an attendee" """ with self.assertRaisesRegexp(ValidationError, "A session's instructor can't be an attendee"): self.session.create({'name': 'Session Test 1', 'seats': 1, 'user_id': self.partner_vauxoo.id, 'attendee_ids': [(6, 0, [self.partner_vauxoo.id])], 'course_id': self.course_id.id}) def test_10_wkf_done(self): """ Check that workflow work fine! """ session_test = self.session.create({'name': 'Session Test 2', 'seats': 2, 'user_id': self.partner_vauxoo.id, 'attendee_ids': [(6, 0, [self.partner_attende.id])], 'course_id': self. course_id.id}) self.assertEqual(session_test.state, 'draft', 'Initial state should be in draft') session_test.signal_workflow('button_confirm') self.assertEqual(session_test.state, 'confirmed', "Signal Confirm don't work") session_test.signal_workflow('button_done') self.assertEqual(session_test.state, 'done', "Signal Done don't work") <|reserved_special_token_1|> # -*- encoding: utf-8 -*- from openerp.tests.common import TransactionCase from openerp.exceptions import ValidationError class GlobalTestOpenAcademySession(TransactionCase): ''' Global Test to openacademy session model. Test create session and trigger constraint ''' # Pseudo-constructor methods def setUp(self): # Define Global Variable to tests methods super(GlobalTestOpenAcademySession, self).setUp() self.session = self.env['openacademy.session'] self.partner_vauxoo = self.env.ref('base.res_partner_23') self.course_id = self.env.ref('openacademy.course3') self.partner_attende = self.env.ref('base.res_partner_5') # Generic Methods # Test Methods def test_05_instructor_is_attendee(self): ''' Check raise: "A session's instructor can't be an attendee" ''' with self.assertRaisesRegexp( ValidationError, "A session's instructor can't be an attendee"): self.session.create({ 'name': 'Session Test 1', 'seats': 1, 'user_id': self.partner_vauxoo.id, 'attendee_ids': [(6, 0, [self.partner_vauxoo.id])], 'course_id': self.course_id.id }) def test_10_wkf_done(self): ''' Check that workflow work fine! ''' session_test = self.session.create({ 'name': 'Session Test 2', 'seats': 2, 'user_id': self.partner_vauxoo.id, 'attendee_ids': [(6, 0, [self.partner_attende.id])], 'course_id': self.course_id.id }) # Check Initial State self.assertEqual(session_test.state, 'draft', 'Initial state should ' 'be in draft') # Check next state an check it session_test.signal_workflow('button_confirm') self.assertEqual(session_test.state, 'confirmed', "Signal Confirm " "don't work") # Check next state an check it session_test.signal_workflow('button_done') self.assertEqual(session_test.state, 'done', "Signal Done don't work") # self.env.cr.commit() Only for test data generated for test. # Please don't use
flexible
{ "blob_id": "7edd833103e1de92e57559c8a75379c26266963b", "index": 7835, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass GlobalTestOpenAcademySession(TransactionCase):\n <mask token>\n\n def setUp(self):\n super(GlobalTestOpenAcademySession, self).setUp()\n self.session = self.env['openacademy.session']\n self.partner_vauxoo = self.env.ref('base.res_partner_23')\n self.course_id = self.env.ref('openacademy.course3')\n self.partner_attende = self.env.ref('base.res_partner_5')\n\n def test_05_instructor_is_attendee(self):\n \"\"\"\n Check raise: \"A session's instructor can't be an attendee\"\n \"\"\"\n with self.assertRaisesRegexp(ValidationError,\n \"A session's instructor can't be an attendee\"):\n self.session.create({'name': 'Session Test 1', 'seats': 1,\n 'user_id': self.partner_vauxoo.id, 'attendee_ids': [(6, 0,\n [self.partner_vauxoo.id])], 'course_id': self.course_id.id})\n\n def test_10_wkf_done(self):\n \"\"\"\n Check that workflow work fine!\n \"\"\"\n session_test = self.session.create({'name': 'Session Test 2',\n 'seats': 2, 'user_id': self.partner_vauxoo.id, 'attendee_ids':\n [(6, 0, [self.partner_attende.id])], 'course_id': self.\n course_id.id})\n self.assertEqual(session_test.state, 'draft',\n 'Initial state should be in draft')\n session_test.signal_workflow('button_confirm')\n self.assertEqual(session_test.state, 'confirmed',\n \"Signal Confirm don't work\")\n session_test.signal_workflow('button_done')\n self.assertEqual(session_test.state, 'done', \"Signal Done don't work\")\n", "step-3": "<mask token>\n\n\nclass GlobalTestOpenAcademySession(TransactionCase):\n \"\"\"\n Global Test to openacademy session model.\n Test create session and trigger constraint\n \"\"\"\n\n def setUp(self):\n super(GlobalTestOpenAcademySession, self).setUp()\n self.session = self.env['openacademy.session']\n self.partner_vauxoo = self.env.ref('base.res_partner_23')\n self.course_id = self.env.ref('openacademy.course3')\n self.partner_attende = self.env.ref('base.res_partner_5')\n\n def test_05_instructor_is_attendee(self):\n \"\"\"\n Check raise: \"A session's instructor can't be an attendee\"\n \"\"\"\n with self.assertRaisesRegexp(ValidationError,\n \"A session's instructor can't be an attendee\"):\n self.session.create({'name': 'Session Test 1', 'seats': 1,\n 'user_id': self.partner_vauxoo.id, 'attendee_ids': [(6, 0,\n [self.partner_vauxoo.id])], 'course_id': self.course_id.id})\n\n def test_10_wkf_done(self):\n \"\"\"\n Check that workflow work fine!\n \"\"\"\n session_test = self.session.create({'name': 'Session Test 2',\n 'seats': 2, 'user_id': self.partner_vauxoo.id, 'attendee_ids':\n [(6, 0, [self.partner_attende.id])], 'course_id': self.\n course_id.id})\n self.assertEqual(session_test.state, 'draft',\n 'Initial state should be in draft')\n session_test.signal_workflow('button_confirm')\n self.assertEqual(session_test.state, 'confirmed',\n \"Signal Confirm don't work\")\n session_test.signal_workflow('button_done')\n self.assertEqual(session_test.state, 'done', \"Signal Done don't work\")\n", "step-4": "from openerp.tests.common import TransactionCase\nfrom openerp.exceptions import ValidationError\n\n\nclass GlobalTestOpenAcademySession(TransactionCase):\n \"\"\"\n Global Test to openacademy session model.\n Test create session and trigger constraint\n \"\"\"\n\n def setUp(self):\n super(GlobalTestOpenAcademySession, self).setUp()\n self.session = self.env['openacademy.session']\n self.partner_vauxoo = self.env.ref('base.res_partner_23')\n self.course_id = self.env.ref('openacademy.course3')\n self.partner_attende = self.env.ref('base.res_partner_5')\n\n def test_05_instructor_is_attendee(self):\n \"\"\"\n Check raise: \"A session's instructor can't be an attendee\"\n \"\"\"\n with self.assertRaisesRegexp(ValidationError,\n \"A session's instructor can't be an attendee\"):\n self.session.create({'name': 'Session Test 1', 'seats': 1,\n 'user_id': self.partner_vauxoo.id, 'attendee_ids': [(6, 0,\n [self.partner_vauxoo.id])], 'course_id': self.course_id.id})\n\n def test_10_wkf_done(self):\n \"\"\"\n Check that workflow work fine!\n \"\"\"\n session_test = self.session.create({'name': 'Session Test 2',\n 'seats': 2, 'user_id': self.partner_vauxoo.id, 'attendee_ids':\n [(6, 0, [self.partner_attende.id])], 'course_id': self.\n course_id.id})\n self.assertEqual(session_test.state, 'draft',\n 'Initial state should be in draft')\n session_test.signal_workflow('button_confirm')\n self.assertEqual(session_test.state, 'confirmed',\n \"Signal Confirm don't work\")\n session_test.signal_workflow('button_done')\n self.assertEqual(session_test.state, 'done', \"Signal Done don't work\")\n", "step-5": "# -*- encoding: utf-8 -*-\n\nfrom openerp.tests.common import TransactionCase\nfrom openerp.exceptions import ValidationError\n\n\nclass GlobalTestOpenAcademySession(TransactionCase):\n '''\n Global Test to openacademy session model.\n Test create session and trigger constraint\n '''\n\n # Pseudo-constructor methods\n def setUp(self):\n # Define Global Variable to tests methods\n super(GlobalTestOpenAcademySession, self).setUp()\n self.session = self.env['openacademy.session']\n self.partner_vauxoo = self.env.ref('base.res_partner_23')\n self.course_id = self.env.ref('openacademy.course3')\n self.partner_attende = self.env.ref('base.res_partner_5')\n\n # Generic Methods\n\n # Test Methods\n def test_05_instructor_is_attendee(self):\n '''\n Check raise: \"A session's instructor can't be an attendee\"\n '''\n with self.assertRaisesRegexp(\n ValidationError,\n \"A session's instructor can't be an attendee\"):\n self.session.create({\n 'name': 'Session Test 1',\n 'seats': 1,\n 'user_id': self.partner_vauxoo.id,\n 'attendee_ids': [(6, 0, [self.partner_vauxoo.id])],\n 'course_id': self.course_id.id\n })\n\n def test_10_wkf_done(self):\n '''\n Check that workflow work fine!\n '''\n session_test = self.session.create({\n 'name': 'Session Test 2',\n 'seats': 2,\n 'user_id': self.partner_vauxoo.id,\n 'attendee_ids': [(6, 0, [self.partner_attende.id])],\n 'course_id': self.course_id.id\n })\n # Check Initial State\n self.assertEqual(session_test.state, 'draft', 'Initial state should '\n 'be in draft')\n # Check next state an check it\n session_test.signal_workflow('button_confirm')\n self.assertEqual(session_test.state, 'confirmed', \"Signal Confirm \"\n \"don't work\")\n # Check next state an check it\n session_test.signal_workflow('button_done')\n self.assertEqual(session_test.state, 'done', \"Signal Done don't work\")\n # self.env.cr.commit() Only for test data generated for test.\n # Please don't use\n", "step-ids": [ 0, 4, 5, 6, 7 ] }
[ 0, 4, 5, 6, 7 ]
import os def is_admin(): """ The function ``is_admin`` detects whether the calling process is running with administrator/superuser privileges. It works cross-platform on either Windows NT systems or Unix-based systems. """ if os.name == 'nt': try: # Only Windows users with admin privileges can read # the C:\windows\temp directory. os.listdir(os.sep.join([os.environ.get('SystemRoot','C:\\windows'),'temp'])) except: return False else: return True else: # Root has UID 0 on Unix systems. if 'SUDO_USER' in os.environ and os.geteuid() == 0: return True else: return False
normal
{ "blob_id": "f1601d3d820b93631f9b1358627a5716016ad135", "index": 5473, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef is_admin():\n \"\"\"\n The function ``is_admin`` detects whether the calling process is running\n with administrator/superuser privileges. It works cross-platform on \n either Windows NT systems or Unix-based systems.\n \"\"\"\n if os.name == 'nt':\n try:\n os.listdir(os.sep.join([os.environ.get('SystemRoot',\n 'C:\\\\windows'), 'temp']))\n except:\n return False\n else:\n return True\n elif 'SUDO_USER' in os.environ and os.geteuid() == 0:\n return True\n else:\n return False\n", "step-3": "import os\n\n\ndef is_admin():\n \"\"\"\n The function ``is_admin`` detects whether the calling process is running\n with administrator/superuser privileges. It works cross-platform on \n either Windows NT systems or Unix-based systems.\n \"\"\"\n if os.name == 'nt':\n try:\n os.listdir(os.sep.join([os.environ.get('SystemRoot',\n 'C:\\\\windows'), 'temp']))\n except:\n return False\n else:\n return True\n elif 'SUDO_USER' in os.environ and os.geteuid() == 0:\n return True\n else:\n return False\n", "step-4": "import os\n\n\ndef is_admin():\n \"\"\"\n The function ``is_admin`` detects whether the calling process is running\n with administrator/superuser privileges. It works cross-platform on \n either Windows NT systems or Unix-based systems.\n \"\"\"\n if os.name == 'nt':\n try:\n # Only Windows users with admin privileges can read \n # the C:\\windows\\temp directory.\n os.listdir(os.sep.join([os.environ.get('SystemRoot','C:\\\\windows'),'temp']))\n except:\n return False\n else:\n return True\n else:\n # Root has UID 0 on Unix systems.\n if 'SUDO_USER' in os.environ and os.geteuid() == 0:\n return True\n else:\n return False\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
''' Temperature Container ''' class TempHolder: range_start = 0 range_end = 0 star_count_lst = [0,0,0,0,0,0] counter = 0 def __init__(self, in_range_start, in_range_end): self.range_start = in_range_start self.range_end = in_range_end self.counter = 0 self.star_count_lst = [0,0,0,0,0,0] def is_in_temp_range(self, temp): if self.range_start <= temp and temp < self.range_end: return True else: return False def add_rating(self, rating): if int(rating) == 0: self.star_count_lst[0] += 1 if int(rating) == 1: self.star_count_lst[1] += 1 if int(rating) == 2: self.star_count_lst[2] += 1 if int(rating) == 3: self.star_count_lst[3] += 1 if int(rating) == 4: self.star_count_lst[4] += 1 if int(rating) == 5: self.star_count_lst[5] += 1 self.counter += 1 def __str__(self): return_str = "" return_str += "Temp: " + str(self.range_start) + "-" + str(self.range_end) + "\n" return_str += "Count: " + str(self.counter) + "\n" if self.star_count_lst[0] == 0: return_str += "0 Stars: 0.00%\n" else: return_str += "0 Stars: " + str(round((self.star_count_lst[0] / (self.counter * 1.0)), 4) * 100) + "%\n" if self.star_count_lst[1] == 0: return_str += "1 Stars: 0.00%\n" else: return_str += "1 Stars: " + str(round((self.star_count_lst[1] / (self.counter * 1.0)), 4) * 100) + "%\n" if self.star_count_lst[2] == 0: return_str += "2 Stars: 0.00%\n" else: return_str += "2 Stars: " + str(round((self.star_count_lst[2] / (self.counter * 1.0)), 4) * 100) + "%\n" if self.star_count_lst[3] == 0: return_str += "3 Stars: 0.00%\n" else: return_str += "3 Stars: " + str(round((self.star_count_lst[3] / (self.counter * 1.0)), 4) * 100) + "%\n" if self.star_count_lst[4] == 0: return_str += "4 Stars: 0.00%\n" else: return_str += "4 Stars: " + str(round((self.star_count_lst[4] / (self.counter * 1.0)), 4) * 100) + "%\n" if self.star_count_lst[5] == 0: return_str += "5 Stars: 0.00%\n" else: return_str += "5 Stars: " + str(round((self.star_count_lst[5] / (self.counter * 1.0)), 4) * 100) + "%\n" return return_str class TempAnalysis: temp_holder_lst = list() def __init__(self): temp_counter = 0 while temp_counter < 110: self.temp_holder_lst.append(TempHolder(temp_counter, temp_counter + 10)) temp_counter += 10 def add_rating(self, rating, temp): for temp_holder in self.temp_holder_lst: if temp_holder.is_in_temp_range(temp): temp_holder.add_rating(rating) return True return False def __str__(self): return_str = "Breakdown by Temperature:\n" return_str += "-------------------------\n" for temp_holder in self.temp_holder_lst: return_str += str(temp_holder) + "\n" return return_str ''' Temperature Container ''' class FRSHTTHolder: frshtt_code = "" star_count_lst = [0,0,0,0,0,0] counter = 0 def __init__(self, in_frshtt_code): self.frshtt_code = in_frshtt_code self.counter = 0 self.star_count_lst = [0,0,0,0,0,0] def is_in_code(self, in_frshtt_code): if self.frshtt_code == in_frshtt_code: return True else: return False def add_rating(self, rating): if int(rating) == 0: self.star_count_lst[0] += 1 if int(rating) == 1: self.star_count_lst[1] += 1 if int(rating) == 2: self.star_count_lst[2] += 1 if int(rating) == 3: self.star_count_lst[3] += 1 if int(rating) == 4: self.star_count_lst[4] += 1 if int(rating) == 5: self.star_count_lst[5] += 1 self.counter += 1 def __str__(self): return_str = "" return_str += "Code: " + str(self.frshtt_code) + "\n" return_str += "Count: " + str(self.counter) + "\n" if self.star_count_lst[0] == 0: return_str += "0 Stars: 0.00%\n" else: return_str += "0 Stars: " + str(round((self.star_count_lst[0] / (self.counter * 1.0)), 4) * 100) + "%\n" if self.star_count_lst[1] == 0: return_str += "1 Stars: 0.00%\n" else: return_str += "1 Stars: " + str(round((self.star_count_lst[1] / (self.counter * 1.0)), 4) * 100) + "%\n" if self.star_count_lst[2] == 0: return_str += "2 Stars: 0.00%\n" else: return_str += "2 Stars: " + str(round((self.star_count_lst[2] / (self.counter * 1.0)), 4) * 100) + "%\n" if self.star_count_lst[3] == 0: return_str += "3 Stars: 0.00%\n" else: return_str += "3 Stars: " + str(round((self.star_count_lst[3] / (self.counter * 1.0)), 4) * 100) + "%\n" if self.star_count_lst[4] == 0: return_str += "4 Stars: 0.00%\n" else: return_str += "4 Stars: " + str(round((self.star_count_lst[4] / (self.counter * 1.0)), 4) * 100) + "%\n" if self.star_count_lst[5] == 0: return_str += "5 Stars: 0.00%\n" else: return_str += "5 Stars: " + str(round((self.star_count_lst[5] / (self.counter * 1.0)), 4) * 100) + "%\n" return return_str class FRSHTTAnalysis: frshtt_holder_lst = list() def __init__(self): # no weather self.frshtt_holder_lst.append(FRSHTTHolder("000000")) # rain self.frshtt_holder_lst.append(FRSHTTHolder("010000")) # thunder strom self.frshtt_holder_lst.append(FRSHTTHolder("010010")) # fog self.frshtt_holder_lst.append(FRSHTTHolder("100000")) # snow self.frshtt_holder_lst.append(FRSHTTHolder("001000")) # mixed (snow/rain) self.frshtt_holder_lst.append(FRSHTTHolder("011000")) # dry thunder self.frshtt_holder_lst.append(FRSHTTHolder("000010")) # hail self.frshtt_holder_lst.append(FRSHTTHolder("000100")) def add_rating(self, rating, frshtt_code): for frshtt_holder in self.frshtt_holder_lst: if frshtt_holder.is_in_code(frshtt_code): frshtt_holder.add_rating(rating) return True return False def __str__(self): return_str = "Breakdown by Code:\n" return_str += "-------------------------\n" for frshtt_holder in self.frshtt_holder_lst: return_str += str(frshtt_holder) + "\n" return return_str
normal
{ "blob_id": "330b843501e0fdaff21cc4eff1ef930d54ab6e8d", "index": 747, "step-1": "<mask token>\n\n\nclass FRSHTTHolder:\n frshtt_code = ''\n star_count_lst = [0, 0, 0, 0, 0, 0]\n counter = 0\n\n def __init__(self, in_frshtt_code):\n self.frshtt_code = in_frshtt_code\n self.counter = 0\n self.star_count_lst = [0, 0, 0, 0, 0, 0]\n\n def is_in_code(self, in_frshtt_code):\n if self.frshtt_code == in_frshtt_code:\n return True\n else:\n return False\n\n def add_rating(self, rating):\n if int(rating) == 0:\n self.star_count_lst[0] += 1\n if int(rating) == 1:\n self.star_count_lst[1] += 1\n if int(rating) == 2:\n self.star_count_lst[2] += 1\n if int(rating) == 3:\n self.star_count_lst[3] += 1\n if int(rating) == 4:\n self.star_count_lst[4] += 1\n if int(rating) == 5:\n self.star_count_lst[5] += 1\n self.counter += 1\n\n def __str__(self):\n return_str = ''\n return_str += 'Code: ' + str(self.frshtt_code) + '\\n'\n return_str += 'Count: ' + str(self.counter) + '\\n'\n if self.star_count_lst[0] == 0:\n return_str += '0 Stars: 0.00%\\n'\n else:\n return_str += '0 Stars: ' + str(round(self.star_count_lst[0] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[1] == 0:\n return_str += '1 Stars: 0.00%\\n'\n else:\n return_str += '1 Stars: ' + str(round(self.star_count_lst[1] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[2] == 0:\n return_str += '2 Stars: 0.00%\\n'\n else:\n return_str += '2 Stars: ' + str(round(self.star_count_lst[2] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[3] == 0:\n return_str += '3 Stars: 0.00%\\n'\n else:\n return_str += '3 Stars: ' + str(round(self.star_count_lst[3] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[4] == 0:\n return_str += '4 Stars: 0.00%\\n'\n else:\n return_str += '4 Stars: ' + str(round(self.star_count_lst[4] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[5] == 0:\n return_str += '5 Stars: 0.00%\\n'\n else:\n return_str += '5 Stars: ' + str(round(self.star_count_lst[5] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n return return_str\n\n\nclass FRSHTTAnalysis:\n frshtt_holder_lst = list()\n\n def __init__(self):\n self.frshtt_holder_lst.append(FRSHTTHolder('000000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('010000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('010010'))\n self.frshtt_holder_lst.append(FRSHTTHolder('100000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('001000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('011000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('000010'))\n self.frshtt_holder_lst.append(FRSHTTHolder('000100'))\n\n def add_rating(self, rating, frshtt_code):\n for frshtt_holder in self.frshtt_holder_lst:\n if frshtt_holder.is_in_code(frshtt_code):\n frshtt_holder.add_rating(rating)\n return True\n return False\n\n def __str__(self):\n return_str = 'Breakdown by Code:\\n'\n return_str += '-------------------------\\n'\n for frshtt_holder in self.frshtt_holder_lst:\n return_str += str(frshtt_holder) + '\\n'\n return return_str\n", "step-2": "<mask token>\n\n\nclass TempAnalysis:\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return_str = 'Breakdown by Temperature:\\n'\n return_str += '-------------------------\\n'\n for temp_holder in self.temp_holder_lst:\n return_str += str(temp_holder) + '\\n'\n return return_str\n\n\n<mask token>\n\n\nclass FRSHTTHolder:\n frshtt_code = ''\n star_count_lst = [0, 0, 0, 0, 0, 0]\n counter = 0\n\n def __init__(self, in_frshtt_code):\n self.frshtt_code = in_frshtt_code\n self.counter = 0\n self.star_count_lst = [0, 0, 0, 0, 0, 0]\n\n def is_in_code(self, in_frshtt_code):\n if self.frshtt_code == in_frshtt_code:\n return True\n else:\n return False\n\n def add_rating(self, rating):\n if int(rating) == 0:\n self.star_count_lst[0] += 1\n if int(rating) == 1:\n self.star_count_lst[1] += 1\n if int(rating) == 2:\n self.star_count_lst[2] += 1\n if int(rating) == 3:\n self.star_count_lst[3] += 1\n if int(rating) == 4:\n self.star_count_lst[4] += 1\n if int(rating) == 5:\n self.star_count_lst[5] += 1\n self.counter += 1\n\n def __str__(self):\n return_str = ''\n return_str += 'Code: ' + str(self.frshtt_code) + '\\n'\n return_str += 'Count: ' + str(self.counter) + '\\n'\n if self.star_count_lst[0] == 0:\n return_str += '0 Stars: 0.00%\\n'\n else:\n return_str += '0 Stars: ' + str(round(self.star_count_lst[0] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[1] == 0:\n return_str += '1 Stars: 0.00%\\n'\n else:\n return_str += '1 Stars: ' + str(round(self.star_count_lst[1] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[2] == 0:\n return_str += '2 Stars: 0.00%\\n'\n else:\n return_str += '2 Stars: ' + str(round(self.star_count_lst[2] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[3] == 0:\n return_str += '3 Stars: 0.00%\\n'\n else:\n return_str += '3 Stars: ' + str(round(self.star_count_lst[3] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[4] == 0:\n return_str += '4 Stars: 0.00%\\n'\n else:\n return_str += '4 Stars: ' + str(round(self.star_count_lst[4] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[5] == 0:\n return_str += '5 Stars: 0.00%\\n'\n else:\n return_str += '5 Stars: ' + str(round(self.star_count_lst[5] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n return return_str\n\n\nclass FRSHTTAnalysis:\n frshtt_holder_lst = list()\n\n def __init__(self):\n self.frshtt_holder_lst.append(FRSHTTHolder('000000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('010000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('010010'))\n self.frshtt_holder_lst.append(FRSHTTHolder('100000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('001000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('011000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('000010'))\n self.frshtt_holder_lst.append(FRSHTTHolder('000100'))\n\n def add_rating(self, rating, frshtt_code):\n for frshtt_holder in self.frshtt_holder_lst:\n if frshtt_holder.is_in_code(frshtt_code):\n frshtt_holder.add_rating(rating)\n return True\n return False\n\n def __str__(self):\n return_str = 'Breakdown by Code:\\n'\n return_str += '-------------------------\\n'\n for frshtt_holder in self.frshtt_holder_lst:\n return_str += str(frshtt_holder) + '\\n'\n return return_str\n", "step-3": "<mask token>\n\n\nclass TempAnalysis:\n <mask token>\n\n def __init__(self):\n temp_counter = 0\n while temp_counter < 110:\n self.temp_holder_lst.append(TempHolder(temp_counter, \n temp_counter + 10))\n temp_counter += 10\n\n def add_rating(self, rating, temp):\n for temp_holder in self.temp_holder_lst:\n if temp_holder.is_in_temp_range(temp):\n temp_holder.add_rating(rating)\n return True\n return False\n\n def __str__(self):\n return_str = 'Breakdown by Temperature:\\n'\n return_str += '-------------------------\\n'\n for temp_holder in self.temp_holder_lst:\n return_str += str(temp_holder) + '\\n'\n return return_str\n\n\n<mask token>\n\n\nclass FRSHTTHolder:\n frshtt_code = ''\n star_count_lst = [0, 0, 0, 0, 0, 0]\n counter = 0\n\n def __init__(self, in_frshtt_code):\n self.frshtt_code = in_frshtt_code\n self.counter = 0\n self.star_count_lst = [0, 0, 0, 0, 0, 0]\n\n def is_in_code(self, in_frshtt_code):\n if self.frshtt_code == in_frshtt_code:\n return True\n else:\n return False\n\n def add_rating(self, rating):\n if int(rating) == 0:\n self.star_count_lst[0] += 1\n if int(rating) == 1:\n self.star_count_lst[1] += 1\n if int(rating) == 2:\n self.star_count_lst[2] += 1\n if int(rating) == 3:\n self.star_count_lst[3] += 1\n if int(rating) == 4:\n self.star_count_lst[4] += 1\n if int(rating) == 5:\n self.star_count_lst[5] += 1\n self.counter += 1\n\n def __str__(self):\n return_str = ''\n return_str += 'Code: ' + str(self.frshtt_code) + '\\n'\n return_str += 'Count: ' + str(self.counter) + '\\n'\n if self.star_count_lst[0] == 0:\n return_str += '0 Stars: 0.00%\\n'\n else:\n return_str += '0 Stars: ' + str(round(self.star_count_lst[0] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[1] == 0:\n return_str += '1 Stars: 0.00%\\n'\n else:\n return_str += '1 Stars: ' + str(round(self.star_count_lst[1] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[2] == 0:\n return_str += '2 Stars: 0.00%\\n'\n else:\n return_str += '2 Stars: ' + str(round(self.star_count_lst[2] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[3] == 0:\n return_str += '3 Stars: 0.00%\\n'\n else:\n return_str += '3 Stars: ' + str(round(self.star_count_lst[3] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[4] == 0:\n return_str += '4 Stars: 0.00%\\n'\n else:\n return_str += '4 Stars: ' + str(round(self.star_count_lst[4] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[5] == 0:\n return_str += '5 Stars: 0.00%\\n'\n else:\n return_str += '5 Stars: ' + str(round(self.star_count_lst[5] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n return return_str\n\n\nclass FRSHTTAnalysis:\n frshtt_holder_lst = list()\n\n def __init__(self):\n self.frshtt_holder_lst.append(FRSHTTHolder('000000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('010000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('010010'))\n self.frshtt_holder_lst.append(FRSHTTHolder('100000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('001000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('011000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('000010'))\n self.frshtt_holder_lst.append(FRSHTTHolder('000100'))\n\n def add_rating(self, rating, frshtt_code):\n for frshtt_holder in self.frshtt_holder_lst:\n if frshtt_holder.is_in_code(frshtt_code):\n frshtt_holder.add_rating(rating)\n return True\n return False\n\n def __str__(self):\n return_str = 'Breakdown by Code:\\n'\n return_str += '-------------------------\\n'\n for frshtt_holder in self.frshtt_holder_lst:\n return_str += str(frshtt_holder) + '\\n'\n return return_str\n", "step-4": "<mask token>\n\n\nclass TempHolder:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, in_range_start, in_range_end):\n self.range_start = in_range_start\n self.range_end = in_range_end\n self.counter = 0\n self.star_count_lst = [0, 0, 0, 0, 0, 0]\n\n def is_in_temp_range(self, temp):\n if self.range_start <= temp and temp < self.range_end:\n return True\n else:\n return False\n <mask token>\n <mask token>\n\n\nclass TempAnalysis:\n temp_holder_lst = list()\n\n def __init__(self):\n temp_counter = 0\n while temp_counter < 110:\n self.temp_holder_lst.append(TempHolder(temp_counter, \n temp_counter + 10))\n temp_counter += 10\n\n def add_rating(self, rating, temp):\n for temp_holder in self.temp_holder_lst:\n if temp_holder.is_in_temp_range(temp):\n temp_holder.add_rating(rating)\n return True\n return False\n\n def __str__(self):\n return_str = 'Breakdown by Temperature:\\n'\n return_str += '-------------------------\\n'\n for temp_holder in self.temp_holder_lst:\n return_str += str(temp_holder) + '\\n'\n return return_str\n\n\n<mask token>\n\n\nclass FRSHTTHolder:\n frshtt_code = ''\n star_count_lst = [0, 0, 0, 0, 0, 0]\n counter = 0\n\n def __init__(self, in_frshtt_code):\n self.frshtt_code = in_frshtt_code\n self.counter = 0\n self.star_count_lst = [0, 0, 0, 0, 0, 0]\n\n def is_in_code(self, in_frshtt_code):\n if self.frshtt_code == in_frshtt_code:\n return True\n else:\n return False\n\n def add_rating(self, rating):\n if int(rating) == 0:\n self.star_count_lst[0] += 1\n if int(rating) == 1:\n self.star_count_lst[1] += 1\n if int(rating) == 2:\n self.star_count_lst[2] += 1\n if int(rating) == 3:\n self.star_count_lst[3] += 1\n if int(rating) == 4:\n self.star_count_lst[4] += 1\n if int(rating) == 5:\n self.star_count_lst[5] += 1\n self.counter += 1\n\n def __str__(self):\n return_str = ''\n return_str += 'Code: ' + str(self.frshtt_code) + '\\n'\n return_str += 'Count: ' + str(self.counter) + '\\n'\n if self.star_count_lst[0] == 0:\n return_str += '0 Stars: 0.00%\\n'\n else:\n return_str += '0 Stars: ' + str(round(self.star_count_lst[0] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[1] == 0:\n return_str += '1 Stars: 0.00%\\n'\n else:\n return_str += '1 Stars: ' + str(round(self.star_count_lst[1] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[2] == 0:\n return_str += '2 Stars: 0.00%\\n'\n else:\n return_str += '2 Stars: ' + str(round(self.star_count_lst[2] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[3] == 0:\n return_str += '3 Stars: 0.00%\\n'\n else:\n return_str += '3 Stars: ' + str(round(self.star_count_lst[3] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[4] == 0:\n return_str += '4 Stars: 0.00%\\n'\n else:\n return_str += '4 Stars: ' + str(round(self.star_count_lst[4] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n if self.star_count_lst[5] == 0:\n return_str += '5 Stars: 0.00%\\n'\n else:\n return_str += '5 Stars: ' + str(round(self.star_count_lst[5] /\n (self.counter * 1.0), 4) * 100) + '%\\n'\n return return_str\n\n\nclass FRSHTTAnalysis:\n frshtt_holder_lst = list()\n\n def __init__(self):\n self.frshtt_holder_lst.append(FRSHTTHolder('000000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('010000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('010010'))\n self.frshtt_holder_lst.append(FRSHTTHolder('100000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('001000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('011000'))\n self.frshtt_holder_lst.append(FRSHTTHolder('000010'))\n self.frshtt_holder_lst.append(FRSHTTHolder('000100'))\n\n def add_rating(self, rating, frshtt_code):\n for frshtt_holder in self.frshtt_holder_lst:\n if frshtt_holder.is_in_code(frshtt_code):\n frshtt_holder.add_rating(rating)\n return True\n return False\n\n def __str__(self):\n return_str = 'Breakdown by Code:\\n'\n return_str += '-------------------------\\n'\n for frshtt_holder in self.frshtt_holder_lst:\n return_str += str(frshtt_holder) + '\\n'\n return return_str\n", "step-5": "'''\nTemperature Container\n'''\nclass TempHolder:\n range_start = 0\n range_end = 0\n \n star_count_lst = [0,0,0,0,0,0]\n counter = 0\n \n def __init__(self, in_range_start, in_range_end):\n self.range_start = in_range_start\n self.range_end = in_range_end\n self.counter = 0\n self.star_count_lst = [0,0,0,0,0,0]\n \n def is_in_temp_range(self, temp):\n if self.range_start <= temp and temp < self.range_end:\n return True\n else:\n return False\n \n def add_rating(self, rating):\n if int(rating) == 0:\n self.star_count_lst[0] += 1\n if int(rating) == 1:\n self.star_count_lst[1] += 1\n if int(rating) == 2:\n self.star_count_lst[2] += 1\n if int(rating) == 3:\n self.star_count_lst[3] += 1\n if int(rating) == 4:\n self.star_count_lst[4] += 1\n if int(rating) == 5:\n self.star_count_lst[5] += 1\n \n self.counter += 1\n \n def __str__(self):\n return_str = \"\"\n \n return_str += \"Temp: \" + str(self.range_start) + \"-\" + str(self.range_end) + \"\\n\"\n return_str += \"Count: \" + str(self.counter) + \"\\n\"\n\n if self.star_count_lst[0] == 0:\n return_str += \"0 Stars: 0.00%\\n\" \n else:\n return_str += \"0 Stars: \" + str(round((self.star_count_lst[0] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[1] == 0:\n return_str += \"1 Stars: 0.00%\\n\"\n else:\n return_str += \"1 Stars: \" + str(round((self.star_count_lst[1] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[2] == 0:\n return_str += \"2 Stars: 0.00%\\n\"\n else:\n return_str += \"2 Stars: \" + str(round((self.star_count_lst[2] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[3] == 0:\n return_str += \"3 Stars: 0.00%\\n\"\n else:\n return_str += \"3 Stars: \" + str(round((self.star_count_lst[3] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[4] == 0:\n return_str += \"4 Stars: 0.00%\\n\"\n else:\n return_str += \"4 Stars: \" + str(round((self.star_count_lst[4] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[5] == 0:\n return_str += \"5 Stars: 0.00%\\n\"\n else:\n return_str += \"5 Stars: \" + str(round((self.star_count_lst[5] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n return return_str\n \nclass TempAnalysis:\n temp_holder_lst = list()\n \n def __init__(self):\n temp_counter = 0\n \n while temp_counter < 110:\n self.temp_holder_lst.append(TempHolder(temp_counter, temp_counter + 10))\n temp_counter += 10\n \n def add_rating(self, rating, temp):\n for temp_holder in self.temp_holder_lst:\n if temp_holder.is_in_temp_range(temp):\n temp_holder.add_rating(rating)\n return True\n \n return False\n \n def __str__(self):\n return_str = \"Breakdown by Temperature:\\n\"\n return_str += \"-------------------------\\n\"\n \n for temp_holder in self.temp_holder_lst:\n return_str += str(temp_holder) + \"\\n\"\n \n return return_str\n \n \n'''\nTemperature Container\n'''\nclass FRSHTTHolder:\n frshtt_code = \"\"\n \n star_count_lst = [0,0,0,0,0,0]\n counter = 0\n \n def __init__(self, in_frshtt_code):\n self.frshtt_code = in_frshtt_code\n self.counter = 0\n self.star_count_lst = [0,0,0,0,0,0]\n \n def is_in_code(self, in_frshtt_code):\n if self.frshtt_code == in_frshtt_code:\n return True\n else:\n return False\n \n def add_rating(self, rating):\n if int(rating) == 0:\n self.star_count_lst[0] += 1\n if int(rating) == 1:\n self.star_count_lst[1] += 1\n if int(rating) == 2:\n self.star_count_lst[2] += 1\n if int(rating) == 3:\n self.star_count_lst[3] += 1\n if int(rating) == 4:\n self.star_count_lst[4] += 1\n if int(rating) == 5:\n self.star_count_lst[5] += 1\n \n self.counter += 1\n \n def __str__(self):\n return_str = \"\"\n \n return_str += \"Code: \" + str(self.frshtt_code) + \"\\n\"\n return_str += \"Count: \" + str(self.counter) + \"\\n\"\n\n if self.star_count_lst[0] == 0:\n return_str += \"0 Stars: 0.00%\\n\" \n else:\n return_str += \"0 Stars: \" + str(round((self.star_count_lst[0] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[1] == 0:\n return_str += \"1 Stars: 0.00%\\n\"\n else:\n return_str += \"1 Stars: \" + str(round((self.star_count_lst[1] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[2] == 0:\n return_str += \"2 Stars: 0.00%\\n\"\n else:\n return_str += \"2 Stars: \" + str(round((self.star_count_lst[2] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[3] == 0:\n return_str += \"3 Stars: 0.00%\\n\"\n else:\n return_str += \"3 Stars: \" + str(round((self.star_count_lst[3] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[4] == 0:\n return_str += \"4 Stars: 0.00%\\n\"\n else:\n return_str += \"4 Stars: \" + str(round((self.star_count_lst[4] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n if self.star_count_lst[5] == 0:\n return_str += \"5 Stars: 0.00%\\n\"\n else:\n return_str += \"5 Stars: \" + str(round((self.star_count_lst[5] / (self.counter * 1.0)), 4) * 100) + \"%\\n\"\n \n return return_str\n \nclass FRSHTTAnalysis:\n frshtt_holder_lst = list()\n \n def __init__(self):\n # no weather\n self.frshtt_holder_lst.append(FRSHTTHolder(\"000000\"))\n # rain\n self.frshtt_holder_lst.append(FRSHTTHolder(\"010000\"))\n # thunder strom\n self.frshtt_holder_lst.append(FRSHTTHolder(\"010010\"))\n # fog\n self.frshtt_holder_lst.append(FRSHTTHolder(\"100000\"))\n # snow\n self.frshtt_holder_lst.append(FRSHTTHolder(\"001000\"))\n # mixed (snow/rain)\n self.frshtt_holder_lst.append(FRSHTTHolder(\"011000\"))\n # dry thunder\n self.frshtt_holder_lst.append(FRSHTTHolder(\"000010\"))\n # hail\n self.frshtt_holder_lst.append(FRSHTTHolder(\"000100\"))\n \n def add_rating(self, rating, frshtt_code):\n for frshtt_holder in self.frshtt_holder_lst:\n if frshtt_holder.is_in_code(frshtt_code):\n frshtt_holder.add_rating(rating)\n return True\n \n return False\n \n def __str__(self):\n return_str = \"Breakdown by Code:\\n\"\n return_str += \"-------------------------\\n\"\n \n for frshtt_holder in self.frshtt_holder_lst:\n return_str += str(frshtt_holder) + \"\\n\"\n \n return return_str\n", "step-ids": [ 11, 13, 15, 19, 23 ] }
[ 11, 13, 15, 19, 23 ]
<|reserved_special_token_0|> class LicenseChecker(object): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class LicenseChecker(object): <|reserved_special_token_0|> <|reserved_special_token_0|> def __updateTimes(self, times): actual = self.__countTimes() ff = open('times.ehead', 'w') ff.write(str(actual - times)) ff.close() def isActive(self): try: site = urllib.urlopen(self.url) content = site.readlines() site.close() except IOError: if not self.__countTimes() == 0: self.__updateTimes(1) return {'active': True, 'msg': 'Ejecutando sin conexion.'} else: return {'active': False, 'msg': 'Ejecutado demasiadas veces sin conexion.'} if content[0].strip() == 'ACTIVE': self.__updateTimes(self.count_offline) return {'active': True, 'msg': 'Iniciando Sistema'} else: return {'active': False, 'msg': content[0].strip()} <|reserved_special_token_1|> <|reserved_special_token_0|> class LicenseChecker(object): def __init__(self): self.url = 'http://logon.guidoaccardo.com.ar/' self.count_offline = 15 def __countTimes(self): ff = open('times.ehead', 'r') bb = ff.read() ff.close() return int(bb) def __updateTimes(self, times): actual = self.__countTimes() ff = open('times.ehead', 'w') ff.write(str(actual - times)) ff.close() def isActive(self): try: site = urllib.urlopen(self.url) content = site.readlines() site.close() except IOError: if not self.__countTimes() == 0: self.__updateTimes(1) return {'active': True, 'msg': 'Ejecutando sin conexion.'} else: return {'active': False, 'msg': 'Ejecutado demasiadas veces sin conexion.'} if content[0].strip() == 'ACTIVE': self.__updateTimes(self.count_offline) return {'active': True, 'msg': 'Iniciando Sistema'} else: return {'active': False, 'msg': content[0].strip()} <|reserved_special_token_1|> import urllib class LicenseChecker(object): def __init__(self): self.url = 'http://logon.guidoaccardo.com.ar/' self.count_offline = 15 def __countTimes(self): ff = open('times.ehead', 'r') bb = ff.read() ff.close() return int(bb) def __updateTimes(self, times): actual = self.__countTimes() ff = open('times.ehead', 'w') ff.write(str(actual - times)) ff.close() def isActive(self): try: site = urllib.urlopen(self.url) content = site.readlines() site.close() except IOError: if not self.__countTimes() == 0: self.__updateTimes(1) return {'active': True, 'msg': 'Ejecutando sin conexion.'} else: return {'active': False, 'msg': 'Ejecutado demasiadas veces sin conexion.'} if content[0].strip() == 'ACTIVE': self.__updateTimes(self.count_offline) return {'active': True, 'msg': 'Iniciando Sistema'} else: return {'active': False, 'msg': content[0].strip()} <|reserved_special_token_1|> #!/usr/bin/env python import urllib class LicenseChecker( object ): def __init__( self ): self.url = 'http://logon.guidoaccardo.com.ar/' self.count_offline = 15 def __countTimes( self ): ff = open( 'times.ehead', 'r' ) bb = ff.read() ff.close() return int( bb ) def __updateTimes( self, times ): actual = self.__countTimes() ff = open( 'times.ehead', 'w' ) ff.write( str( actual-times ) ) ff.close() def isActive( self ): try: site = urllib.urlopen( self.url ) content = site.readlines() site.close() except IOError: if not self.__countTimes() == 0: self.__updateTimes( 1 ) return { 'active':True, 'msg':'Ejecutando sin conexion.' } else: return { 'active':False, 'msg':'Ejecutado demasiadas veces sin conexion.' } if content[0].strip() == 'ACTIVE': self.__updateTimes( self.count_offline ) return { 'active':True, 'msg':'Iniciando Sistema' } else: return { 'active':False, 'msg':content[0].strip() }
flexible
{ "blob_id": "c70aa1a373530ac73553753e62d3989f5bc79287", "index": 687, "step-1": "<mask token>\n\n\nclass LicenseChecker(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass LicenseChecker(object):\n <mask token>\n <mask token>\n\n def __updateTimes(self, times):\n actual = self.__countTimes()\n ff = open('times.ehead', 'w')\n ff.write(str(actual - times))\n ff.close()\n\n def isActive(self):\n try:\n site = urllib.urlopen(self.url)\n content = site.readlines()\n site.close()\n except IOError:\n if not self.__countTimes() == 0:\n self.__updateTimes(1)\n return {'active': True, 'msg': 'Ejecutando sin conexion.'}\n else:\n return {'active': False, 'msg':\n 'Ejecutado demasiadas veces sin conexion.'}\n if content[0].strip() == 'ACTIVE':\n self.__updateTimes(self.count_offline)\n return {'active': True, 'msg': 'Iniciando Sistema'}\n else:\n return {'active': False, 'msg': content[0].strip()}\n", "step-3": "<mask token>\n\n\nclass LicenseChecker(object):\n\n def __init__(self):\n self.url = 'http://logon.guidoaccardo.com.ar/'\n self.count_offline = 15\n\n def __countTimes(self):\n ff = open('times.ehead', 'r')\n bb = ff.read()\n ff.close()\n return int(bb)\n\n def __updateTimes(self, times):\n actual = self.__countTimes()\n ff = open('times.ehead', 'w')\n ff.write(str(actual - times))\n ff.close()\n\n def isActive(self):\n try:\n site = urllib.urlopen(self.url)\n content = site.readlines()\n site.close()\n except IOError:\n if not self.__countTimes() == 0:\n self.__updateTimes(1)\n return {'active': True, 'msg': 'Ejecutando sin conexion.'}\n else:\n return {'active': False, 'msg':\n 'Ejecutado demasiadas veces sin conexion.'}\n if content[0].strip() == 'ACTIVE':\n self.__updateTimes(self.count_offline)\n return {'active': True, 'msg': 'Iniciando Sistema'}\n else:\n return {'active': False, 'msg': content[0].strip()}\n", "step-4": "import urllib\n\n\nclass LicenseChecker(object):\n\n def __init__(self):\n self.url = 'http://logon.guidoaccardo.com.ar/'\n self.count_offline = 15\n\n def __countTimes(self):\n ff = open('times.ehead', 'r')\n bb = ff.read()\n ff.close()\n return int(bb)\n\n def __updateTimes(self, times):\n actual = self.__countTimes()\n ff = open('times.ehead', 'w')\n ff.write(str(actual - times))\n ff.close()\n\n def isActive(self):\n try:\n site = urllib.urlopen(self.url)\n content = site.readlines()\n site.close()\n except IOError:\n if not self.__countTimes() == 0:\n self.__updateTimes(1)\n return {'active': True, 'msg': 'Ejecutando sin conexion.'}\n else:\n return {'active': False, 'msg':\n 'Ejecutado demasiadas veces sin conexion.'}\n if content[0].strip() == 'ACTIVE':\n self.__updateTimes(self.count_offline)\n return {'active': True, 'msg': 'Iniciando Sistema'}\n else:\n return {'active': False, 'msg': content[0].strip()}\n", "step-5": "#!/usr/bin/env python\n\nimport urllib\n\nclass LicenseChecker( object ):\n\n def __init__( self ):\n self.url = 'http://logon.guidoaccardo.com.ar/'\n self.count_offline = 15\n\n def __countTimes( self ):\n ff = open( 'times.ehead', 'r' )\n bb = ff.read()\n ff.close()\n\n return int( bb )\n\n def __updateTimes( self, times ):\n actual = self.__countTimes()\n ff = open( 'times.ehead', 'w' )\n ff.write( str( actual-times ) )\n ff.close()\n\n def isActive( self ):\n try:\n site = urllib.urlopen( self.url )\n content = site.readlines()\n site.close()\n except IOError:\n if not self.__countTimes() == 0:\n self.__updateTimes( 1 )\n return { 'active':True, 'msg':'Ejecutando sin conexion.' }\n else:\n return { 'active':False, 'msg':'Ejecutado demasiadas veces sin conexion.' }\n\n if content[0].strip() == 'ACTIVE':\n self.__updateTimes( self.count_offline )\n return { 'active':True, 'msg':'Iniciando Sistema' }\n else:\n return { 'active':False, 'msg':content[0].strip() }\n", "step-ids": [ 1, 3, 5, 6, 7 ] }
[ 1, 3, 5, 6, 7 ]
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack.package import * class RDt(RPackage): """A Wrapper of the JavaScript Library 'DataTables'. Data objects in R can be rendered as HTML tables using the JavaScript library 'DataTables' (typically via R Markdown or Shiny). The 'DataTables' library has been included in this R package. The package name 'DT' is an abbreviation of 'DataTables'.""" cran = "DT" version("0.23", sha256="360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70") version("0.20", sha256="c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f") version("0.17", sha256="e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56") version("0.13", sha256="79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5") version("0.8", sha256="90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21") version("0.7", sha256="1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c") version("0.6", sha256="2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916") version("0.4", sha256="3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19") version("0.3", sha256="ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb") version("0.2", sha256="a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd") version("0.1", sha256="129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756") depends_on("[email protected]:", type=("build", "run")) depends_on("[email protected]:", type=("build", "run")) depends_on("[email protected]:", type=("build", "run"), when="@0.8:") depends_on("r-magrittr", type=("build", "run")) depends_on("r-crosstalk", type=("build", "run")) depends_on("r-jquerylib", type=("build", "run"), when="@0.19:") depends_on("r-promises", type=("build", "run"), when="@0.5:")
normal
{ "blob_id": "c88e2336432f93d95b4e2285aa532b673a4a410b", "index": 1095, "step-1": "<mask token>\n\n\nclass RDt(RPackage):\n <mask token>\n <mask token>\n version('0.23', sha256=\n '360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70')\n version('0.20', sha256=\n 'c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f')\n version('0.17', sha256=\n 'e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56')\n version('0.13', sha256=\n '79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')\n version('0.8', sha256=\n '90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')\n version('0.7', sha256=\n '1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')\n version('0.6', sha256=\n '2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')\n version('0.4', sha256=\n '3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')\n version('0.3', sha256=\n 'ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')\n version('0.2', sha256=\n 'a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')\n version('0.1', sha256=\n '129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'), when='@0.8:')\n depends_on('r-magrittr', type=('build', 'run'))\n depends_on('r-crosstalk', type=('build', 'run'))\n depends_on('r-jquerylib', type=('build', 'run'), when='@0.19:')\n depends_on('r-promises', type=('build', 'run'), when='@0.5:')\n", "step-2": "<mask token>\n\n\nclass RDt(RPackage):\n <mask token>\n cran = 'DT'\n version('0.23', sha256=\n '360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70')\n version('0.20', sha256=\n 'c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f')\n version('0.17', sha256=\n 'e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56')\n version('0.13', sha256=\n '79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')\n version('0.8', sha256=\n '90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')\n version('0.7', sha256=\n '1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')\n version('0.6', sha256=\n '2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')\n version('0.4', sha256=\n '3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')\n version('0.3', sha256=\n 'ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')\n version('0.2', sha256=\n 'a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')\n version('0.1', sha256=\n '129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'), when='@0.8:')\n depends_on('r-magrittr', type=('build', 'run'))\n depends_on('r-crosstalk', type=('build', 'run'))\n depends_on('r-jquerylib', type=('build', 'run'), when='@0.19:')\n depends_on('r-promises', type=('build', 'run'), when='@0.5:')\n", "step-3": "<mask token>\n\n\nclass RDt(RPackage):\n \"\"\"A Wrapper of the JavaScript Library 'DataTables'.\n\n Data objects in R can be rendered as HTML tables using the JavaScript\n library 'DataTables' (typically via R Markdown or Shiny). The 'DataTables'\n library has been included in this R package. The package name 'DT' is an\n abbreviation of 'DataTables'.\"\"\"\n cran = 'DT'\n version('0.23', sha256=\n '360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70')\n version('0.20', sha256=\n 'c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f')\n version('0.17', sha256=\n 'e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56')\n version('0.13', sha256=\n '79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')\n version('0.8', sha256=\n '90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')\n version('0.7', sha256=\n '1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')\n version('0.6', sha256=\n '2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')\n version('0.4', sha256=\n '3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')\n version('0.3', sha256=\n 'ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')\n version('0.2', sha256=\n 'a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')\n version('0.1', sha256=\n '129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'), when='@0.8:')\n depends_on('r-magrittr', type=('build', 'run'))\n depends_on('r-crosstalk', type=('build', 'run'))\n depends_on('r-jquerylib', type=('build', 'run'), when='@0.19:')\n depends_on('r-promises', type=('build', 'run'), when='@0.5:')\n", "step-4": "from spack.package import *\n\n\nclass RDt(RPackage):\n \"\"\"A Wrapper of the JavaScript Library 'DataTables'.\n\n Data objects in R can be rendered as HTML tables using the JavaScript\n library 'DataTables' (typically via R Markdown or Shiny). The 'DataTables'\n library has been included in this R package. The package name 'DT' is an\n abbreviation of 'DataTables'.\"\"\"\n cran = 'DT'\n version('0.23', sha256=\n '360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70')\n version('0.20', sha256=\n 'c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f')\n version('0.17', sha256=\n 'e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56')\n version('0.13', sha256=\n '79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')\n version('0.8', sha256=\n '90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')\n version('0.7', sha256=\n '1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')\n version('0.6', sha256=\n '2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')\n version('0.4', sha256=\n '3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')\n version('0.3', sha256=\n 'ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')\n version('0.2', sha256=\n 'a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')\n version('0.1', sha256=\n '129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'), when='@0.8:')\n depends_on('r-magrittr', type=('build', 'run'))\n depends_on('r-crosstalk', type=('build', 'run'))\n depends_on('r-jquerylib', type=('build', 'run'), when='@0.19:')\n depends_on('r-promises', type=('build', 'run'), when='@0.5:')\n", "step-5": "# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack.package import *\n\n\nclass RDt(RPackage):\n \"\"\"A Wrapper of the JavaScript Library 'DataTables'.\n\n Data objects in R can be rendered as HTML tables using the JavaScript\n library 'DataTables' (typically via R Markdown or Shiny). The 'DataTables'\n library has been included in this R package. The package name 'DT' is an\n abbreviation of 'DataTables'.\"\"\"\n\n cran = \"DT\"\n\n version(\"0.23\", sha256=\"360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70\")\n version(\"0.20\", sha256=\"c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f\")\n version(\"0.17\", sha256=\"e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56\")\n version(\"0.13\", sha256=\"79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5\")\n version(\"0.8\", sha256=\"90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21\")\n version(\"0.7\", sha256=\"1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c\")\n version(\"0.6\", sha256=\"2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916\")\n version(\"0.4\", sha256=\"3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19\")\n version(\"0.3\", sha256=\"ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb\")\n version(\"0.2\", sha256=\"a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd\")\n version(\"0.1\", sha256=\"129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756\")\n\n depends_on(\"[email protected]:\", type=(\"build\", \"run\"))\n depends_on(\"[email protected]:\", type=(\"build\", \"run\"))\n depends_on(\"[email protected]:\", type=(\"build\", \"run\"), when=\"@0.8:\")\n depends_on(\"r-magrittr\", type=(\"build\", \"run\"))\n depends_on(\"r-crosstalk\", type=(\"build\", \"run\"))\n depends_on(\"r-jquerylib\", type=(\"build\", \"run\"), when=\"@0.19:\")\n depends_on(\"r-promises\", type=(\"build\", \"run\"), when=\"@0.5:\")\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def collect_env(): """Collect the information of the running environments. Returns: dict: The environment information. The following fields are contained. - sys.platform: The variable of ``sys.platform``. - Python: Python version. - CUDA available: Bool, indicating if CUDA is available. - GPU devices: Device type of each GPU. - CUDA_HOME (optional): The env var ``CUDA_HOME``. - NVCC (optional): NVCC version. - GCC: GCC version, "n/a" if GCC is not installed. - MSVC: Microsoft Virtual C++ Compiler version, Windows only. - PyTorch: PyTorch version. - PyTorch compiling details: The output of ``torch.__config__.show()``. - TorchVision (optional): TorchVision version. - OpenCV: OpenCV version. - MMCV: MMCV version. - MMCV Compiler: The GCC version for compiling MMCV ops. - MMCV CUDA Compiler: The CUDA version for compiling MMCV ops. """ env_info = {} env_info['sys.platform'] = sys.platform env_info['Python'] = sys.version.replace('\n', '') cuda_available = torch.cuda.is_available() env_info['CUDA available'] = cuda_available if cuda_available: devices = defaultdict(list) for k in range(torch.cuda.device_count()): devices[torch.cuda.get_device_name(k)].append(str(k)) for name, device_ids in devices.items(): env_info['GPU ' + ','.join(device_ids)] = name from mmcv.utils.parrots_wrapper import _get_cuda_home CUDA_HOME = _get_cuda_home() env_info['CUDA_HOME'] = CUDA_HOME if CUDA_HOME is not None and osp.isdir(CUDA_HOME): try: nvcc = osp.join(CUDA_HOME, 'bin/nvcc') nvcc = subprocess.check_output(f'"{nvcc}" -V', shell=True) nvcc = nvcc.decode('utf-8').strip() release = nvcc.rfind('Cuda compilation tools') build = nvcc.rfind('Build ') nvcc = nvcc[release:build].strip() except subprocess.SubprocessError: nvcc = 'Not Available' env_info['NVCC'] = nvcc try: import sysconfig cc = sysconfig.get_config_var('CC') if cc: cc = osp.basename(cc.split()[0]) cc_info = subprocess.check_output(f'{cc} --version', shell=True) env_info['GCC'] = cc_info.decode('utf-8').partition('\n')[0].strip( ) else: import locale import os from distutils.ccompiler import new_compiler ccompiler = new_compiler() ccompiler.initialize() cc = subprocess.check_output(f'{ccompiler.cc}', stderr= subprocess.STDOUT, shell=True) encoding = os.device_encoding(sys.stdout.fileno() ) or locale.getpreferredencoding() env_info['MSVC'] = cc.decode(encoding).partition('\n')[0].strip() env_info['GCC'] = 'n/a' except subprocess.CalledProcessError: env_info['GCC'] = 'n/a' env_info['PyTorch'] = torch.__version__ env_info['PyTorch compiling details'] = get_build_config() try: import torchvision env_info['TorchVision'] = torchvision.__version__ except ModuleNotFoundError: pass env_info['OpenCV'] = cv2.__version__ env_info['MMCV'] = mmcv.__version__ try: from mmcv.ops import get_compiler_version, get_compiling_cuda_version except ModuleNotFoundError: env_info['MMCV Compiler'] = 'n/a' env_info['MMCV CUDA Compiler'] = 'n/a' else: env_info['MMCV Compiler'] = get_compiler_version() env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version() return env_info <|reserved_special_token_1|> <|reserved_special_token_0|> import os.path as osp import subprocess import sys from collections import defaultdict import cv2 import torch import mmcv from .parrots_wrapper import get_build_config def collect_env(): """Collect the information of the running environments. Returns: dict: The environment information. The following fields are contained. - sys.platform: The variable of ``sys.platform``. - Python: Python version. - CUDA available: Bool, indicating if CUDA is available. - GPU devices: Device type of each GPU. - CUDA_HOME (optional): The env var ``CUDA_HOME``. - NVCC (optional): NVCC version. - GCC: GCC version, "n/a" if GCC is not installed. - MSVC: Microsoft Virtual C++ Compiler version, Windows only. - PyTorch: PyTorch version. - PyTorch compiling details: The output of ``torch.__config__.show()``. - TorchVision (optional): TorchVision version. - OpenCV: OpenCV version. - MMCV: MMCV version. - MMCV Compiler: The GCC version for compiling MMCV ops. - MMCV CUDA Compiler: The CUDA version for compiling MMCV ops. """ env_info = {} env_info['sys.platform'] = sys.platform env_info['Python'] = sys.version.replace('\n', '') cuda_available = torch.cuda.is_available() env_info['CUDA available'] = cuda_available if cuda_available: devices = defaultdict(list) for k in range(torch.cuda.device_count()): devices[torch.cuda.get_device_name(k)].append(str(k)) for name, device_ids in devices.items(): env_info['GPU ' + ','.join(device_ids)] = name from mmcv.utils.parrots_wrapper import _get_cuda_home CUDA_HOME = _get_cuda_home() env_info['CUDA_HOME'] = CUDA_HOME if CUDA_HOME is not None and osp.isdir(CUDA_HOME): try: nvcc = osp.join(CUDA_HOME, 'bin/nvcc') nvcc = subprocess.check_output(f'"{nvcc}" -V', shell=True) nvcc = nvcc.decode('utf-8').strip() release = nvcc.rfind('Cuda compilation tools') build = nvcc.rfind('Build ') nvcc = nvcc[release:build].strip() except subprocess.SubprocessError: nvcc = 'Not Available' env_info['NVCC'] = nvcc try: import sysconfig cc = sysconfig.get_config_var('CC') if cc: cc = osp.basename(cc.split()[0]) cc_info = subprocess.check_output(f'{cc} --version', shell=True) env_info['GCC'] = cc_info.decode('utf-8').partition('\n')[0].strip( ) else: import locale import os from distutils.ccompiler import new_compiler ccompiler = new_compiler() ccompiler.initialize() cc = subprocess.check_output(f'{ccompiler.cc}', stderr= subprocess.STDOUT, shell=True) encoding = os.device_encoding(sys.stdout.fileno() ) or locale.getpreferredencoding() env_info['MSVC'] = cc.decode(encoding).partition('\n')[0].strip() env_info['GCC'] = 'n/a' except subprocess.CalledProcessError: env_info['GCC'] = 'n/a' env_info['PyTorch'] = torch.__version__ env_info['PyTorch compiling details'] = get_build_config() try: import torchvision env_info['TorchVision'] = torchvision.__version__ except ModuleNotFoundError: pass env_info['OpenCV'] = cv2.__version__ env_info['MMCV'] = mmcv.__version__ try: from mmcv.ops import get_compiler_version, get_compiling_cuda_version except ModuleNotFoundError: env_info['MMCV Compiler'] = 'n/a' env_info['MMCV CUDA Compiler'] = 'n/a' else: env_info['MMCV Compiler'] = get_compiler_version() env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version() return env_info <|reserved_special_token_1|> # -*- coding: utf-8 -*- # BSD 3-Clause License # # Copyright (c) 2017 # All rights reserved. # Copyright 2022 Huawei Technologies Co., Ltd # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ========================================================================== # -*- coding: utf-8 -*- # BSD 3-Clause License # # Copyright (c) 2017 # All rights reserved. # Copyright 2022 Huawei Technologies Co., Ltd # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ========================================================================== # Copyright (c) OpenMMLab. All rights reserved. """This file holding some environment constant for sharing by other files.""" import os.path as osp import subprocess import sys from collections import defaultdict import cv2 import torch import mmcv from .parrots_wrapper import get_build_config def collect_env(): """Collect the information of the running environments. Returns: dict: The environment information. The following fields are contained. - sys.platform: The variable of ``sys.platform``. - Python: Python version. - CUDA available: Bool, indicating if CUDA is available. - GPU devices: Device type of each GPU. - CUDA_HOME (optional): The env var ``CUDA_HOME``. - NVCC (optional): NVCC version. - GCC: GCC version, "n/a" if GCC is not installed. - MSVC: Microsoft Virtual C++ Compiler version, Windows only. - PyTorch: PyTorch version. - PyTorch compiling details: The output of \ ``torch.__config__.show()``. - TorchVision (optional): TorchVision version. - OpenCV: OpenCV version. - MMCV: MMCV version. - MMCV Compiler: The GCC version for compiling MMCV ops. - MMCV CUDA Compiler: The CUDA version for compiling MMCV ops. """ env_info = {} env_info['sys.platform'] = sys.platform env_info['Python'] = sys.version.replace('\n', '') cuda_available = torch.cuda.is_available() env_info['CUDA available'] = cuda_available if cuda_available: devices = defaultdict(list) for k in range(torch.cuda.device_count()): devices[torch.cuda.get_device_name(k)].append(str(k)) for name, device_ids in devices.items(): env_info['GPU ' + ','.join(device_ids)] = name from mmcv.utils.parrots_wrapper import _get_cuda_home CUDA_HOME = _get_cuda_home() env_info['CUDA_HOME'] = CUDA_HOME if CUDA_HOME is not None and osp.isdir(CUDA_HOME): try: nvcc = osp.join(CUDA_HOME, 'bin/nvcc') nvcc = subprocess.check_output(f'"{nvcc}" -V', shell=True) nvcc = nvcc.decode('utf-8').strip() release = nvcc.rfind('Cuda compilation tools') build = nvcc.rfind('Build ') nvcc = nvcc[release:build].strip() except subprocess.SubprocessError: nvcc = 'Not Available' env_info['NVCC'] = nvcc try: # Check C++ Compiler. # For Unix-like, sysconfig has 'CC' variable like 'gcc -pthread ...', # indicating the compiler used, we use this to get the compiler name import sysconfig cc = sysconfig.get_config_var('CC') if cc: cc = osp.basename(cc.split()[0]) cc_info = subprocess.check_output(f'{cc} --version', shell=True) env_info['GCC'] = cc_info.decode('utf-8').partition( '\n')[0].strip() else: # on Windows, cl.exe is not in PATH. We need to find the path. # distutils.ccompiler.new_compiler() returns a msvccompiler # object and after initialization, path to cl.exe is found. import locale import os from distutils.ccompiler import new_compiler ccompiler = new_compiler() ccompiler.initialize() cc = subprocess.check_output( f'{ccompiler.cc}', stderr=subprocess.STDOUT, shell=True) encoding = os.device_encoding( sys.stdout.fileno()) or locale.getpreferredencoding() env_info['MSVC'] = cc.decode(encoding).partition('\n')[0].strip() env_info['GCC'] = 'n/a' except subprocess.CalledProcessError: env_info['GCC'] = 'n/a' env_info['PyTorch'] = torch.__version__ env_info['PyTorch compiling details'] = get_build_config() try: import torchvision env_info['TorchVision'] = torchvision.__version__ except ModuleNotFoundError: pass env_info['OpenCV'] = cv2.__version__ env_info['MMCV'] = mmcv.__version__ try: from mmcv.ops import get_compiler_version, get_compiling_cuda_version except ModuleNotFoundError: env_info['MMCV Compiler'] = 'n/a' env_info['MMCV CUDA Compiler'] = 'n/a' else: env_info['MMCV Compiler'] = get_compiler_version() env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version() return env_info
flexible
{ "blob_id": "ee489c2e313a96671db79398218f8604f7ae1bf3", "index": 3569, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef collect_env():\n \"\"\"Collect the information of the running environments.\n\n Returns:\n dict: The environment information. The following fields are contained.\n\n - sys.platform: The variable of ``sys.platform``.\n - Python: Python version.\n - CUDA available: Bool, indicating if CUDA is available.\n - GPU devices: Device type of each GPU.\n - CUDA_HOME (optional): The env var ``CUDA_HOME``.\n - NVCC (optional): NVCC version.\n - GCC: GCC version, \"n/a\" if GCC is not installed.\n - MSVC: Microsoft Virtual C++ Compiler version, Windows only.\n - PyTorch: PyTorch version.\n - PyTorch compiling details: The output of ``torch.__config__.show()``.\n - TorchVision (optional): TorchVision version.\n - OpenCV: OpenCV version.\n - MMCV: MMCV version.\n - MMCV Compiler: The GCC version for compiling MMCV ops.\n - MMCV CUDA Compiler: The CUDA version for compiling MMCV ops.\n \"\"\"\n env_info = {}\n env_info['sys.platform'] = sys.platform\n env_info['Python'] = sys.version.replace('\\n', '')\n cuda_available = torch.cuda.is_available()\n env_info['CUDA available'] = cuda_available\n if cuda_available:\n devices = defaultdict(list)\n for k in range(torch.cuda.device_count()):\n devices[torch.cuda.get_device_name(k)].append(str(k))\n for name, device_ids in devices.items():\n env_info['GPU ' + ','.join(device_ids)] = name\n from mmcv.utils.parrots_wrapper import _get_cuda_home\n CUDA_HOME = _get_cuda_home()\n env_info['CUDA_HOME'] = CUDA_HOME\n if CUDA_HOME is not None and osp.isdir(CUDA_HOME):\n try:\n nvcc = osp.join(CUDA_HOME, 'bin/nvcc')\n nvcc = subprocess.check_output(f'\"{nvcc}\" -V', shell=True)\n nvcc = nvcc.decode('utf-8').strip()\n release = nvcc.rfind('Cuda compilation tools')\n build = nvcc.rfind('Build ')\n nvcc = nvcc[release:build].strip()\n except subprocess.SubprocessError:\n nvcc = 'Not Available'\n env_info['NVCC'] = nvcc\n try:\n import sysconfig\n cc = sysconfig.get_config_var('CC')\n if cc:\n cc = osp.basename(cc.split()[0])\n cc_info = subprocess.check_output(f'{cc} --version', shell=True)\n env_info['GCC'] = cc_info.decode('utf-8').partition('\\n')[0].strip(\n )\n else:\n import locale\n import os\n from distutils.ccompiler import new_compiler\n ccompiler = new_compiler()\n ccompiler.initialize()\n cc = subprocess.check_output(f'{ccompiler.cc}', stderr=\n subprocess.STDOUT, shell=True)\n encoding = os.device_encoding(sys.stdout.fileno()\n ) or locale.getpreferredencoding()\n env_info['MSVC'] = cc.decode(encoding).partition('\\n')[0].strip()\n env_info['GCC'] = 'n/a'\n except subprocess.CalledProcessError:\n env_info['GCC'] = 'n/a'\n env_info['PyTorch'] = torch.__version__\n env_info['PyTorch compiling details'] = get_build_config()\n try:\n import torchvision\n env_info['TorchVision'] = torchvision.__version__\n except ModuleNotFoundError:\n pass\n env_info['OpenCV'] = cv2.__version__\n env_info['MMCV'] = mmcv.__version__\n try:\n from mmcv.ops import get_compiler_version, get_compiling_cuda_version\n except ModuleNotFoundError:\n env_info['MMCV Compiler'] = 'n/a'\n env_info['MMCV CUDA Compiler'] = 'n/a'\n else:\n env_info['MMCV Compiler'] = get_compiler_version()\n env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version()\n return env_info\n", "step-3": "<mask token>\nimport os.path as osp\nimport subprocess\nimport sys\nfrom collections import defaultdict\nimport cv2\nimport torch\nimport mmcv\nfrom .parrots_wrapper import get_build_config\n\n\ndef collect_env():\n \"\"\"Collect the information of the running environments.\n\n Returns:\n dict: The environment information. The following fields are contained.\n\n - sys.platform: The variable of ``sys.platform``.\n - Python: Python version.\n - CUDA available: Bool, indicating if CUDA is available.\n - GPU devices: Device type of each GPU.\n - CUDA_HOME (optional): The env var ``CUDA_HOME``.\n - NVCC (optional): NVCC version.\n - GCC: GCC version, \"n/a\" if GCC is not installed.\n - MSVC: Microsoft Virtual C++ Compiler version, Windows only.\n - PyTorch: PyTorch version.\n - PyTorch compiling details: The output of ``torch.__config__.show()``.\n - TorchVision (optional): TorchVision version.\n - OpenCV: OpenCV version.\n - MMCV: MMCV version.\n - MMCV Compiler: The GCC version for compiling MMCV ops.\n - MMCV CUDA Compiler: The CUDA version for compiling MMCV ops.\n \"\"\"\n env_info = {}\n env_info['sys.platform'] = sys.platform\n env_info['Python'] = sys.version.replace('\\n', '')\n cuda_available = torch.cuda.is_available()\n env_info['CUDA available'] = cuda_available\n if cuda_available:\n devices = defaultdict(list)\n for k in range(torch.cuda.device_count()):\n devices[torch.cuda.get_device_name(k)].append(str(k))\n for name, device_ids in devices.items():\n env_info['GPU ' + ','.join(device_ids)] = name\n from mmcv.utils.parrots_wrapper import _get_cuda_home\n CUDA_HOME = _get_cuda_home()\n env_info['CUDA_HOME'] = CUDA_HOME\n if CUDA_HOME is not None and osp.isdir(CUDA_HOME):\n try:\n nvcc = osp.join(CUDA_HOME, 'bin/nvcc')\n nvcc = subprocess.check_output(f'\"{nvcc}\" -V', shell=True)\n nvcc = nvcc.decode('utf-8').strip()\n release = nvcc.rfind('Cuda compilation tools')\n build = nvcc.rfind('Build ')\n nvcc = nvcc[release:build].strip()\n except subprocess.SubprocessError:\n nvcc = 'Not Available'\n env_info['NVCC'] = nvcc\n try:\n import sysconfig\n cc = sysconfig.get_config_var('CC')\n if cc:\n cc = osp.basename(cc.split()[0])\n cc_info = subprocess.check_output(f'{cc} --version', shell=True)\n env_info['GCC'] = cc_info.decode('utf-8').partition('\\n')[0].strip(\n )\n else:\n import locale\n import os\n from distutils.ccompiler import new_compiler\n ccompiler = new_compiler()\n ccompiler.initialize()\n cc = subprocess.check_output(f'{ccompiler.cc}', stderr=\n subprocess.STDOUT, shell=True)\n encoding = os.device_encoding(sys.stdout.fileno()\n ) or locale.getpreferredencoding()\n env_info['MSVC'] = cc.decode(encoding).partition('\\n')[0].strip()\n env_info['GCC'] = 'n/a'\n except subprocess.CalledProcessError:\n env_info['GCC'] = 'n/a'\n env_info['PyTorch'] = torch.__version__\n env_info['PyTorch compiling details'] = get_build_config()\n try:\n import torchvision\n env_info['TorchVision'] = torchvision.__version__\n except ModuleNotFoundError:\n pass\n env_info['OpenCV'] = cv2.__version__\n env_info['MMCV'] = mmcv.__version__\n try:\n from mmcv.ops import get_compiler_version, get_compiling_cuda_version\n except ModuleNotFoundError:\n env_info['MMCV Compiler'] = 'n/a'\n env_info['MMCV CUDA Compiler'] = 'n/a'\n else:\n env_info['MMCV Compiler'] = get_compiler_version()\n env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version()\n return env_info\n", "step-4": "# -*- coding: utf-8 -*-\n# BSD 3-Clause License\n#\n# Copyright (c) 2017\n# All rights reserved.\n# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# ==========================================================================\n\n# -*- coding: utf-8 -*-\n# BSD 3-Clause License\n#\n# Copyright (c) 2017\n# All rights reserved.\n# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# ==========================================================================\n\n# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"This file holding some environment constant for sharing by other files.\"\"\"\n\nimport os.path as osp\nimport subprocess\nimport sys\nfrom collections import defaultdict\n\nimport cv2\nimport torch\n\nimport mmcv\nfrom .parrots_wrapper import get_build_config\n\n\ndef collect_env():\n \"\"\"Collect the information of the running environments.\n\n Returns:\n dict: The environment information. The following fields are contained.\n\n - sys.platform: The variable of ``sys.platform``.\n - Python: Python version.\n - CUDA available: Bool, indicating if CUDA is available.\n - GPU devices: Device type of each GPU.\n - CUDA_HOME (optional): The env var ``CUDA_HOME``.\n - NVCC (optional): NVCC version.\n - GCC: GCC version, \"n/a\" if GCC is not installed.\n - MSVC: Microsoft Virtual C++ Compiler version, Windows only.\n - PyTorch: PyTorch version.\n - PyTorch compiling details: The output of \\\n ``torch.__config__.show()``.\n - TorchVision (optional): TorchVision version.\n - OpenCV: OpenCV version.\n - MMCV: MMCV version.\n - MMCV Compiler: The GCC version for compiling MMCV ops.\n - MMCV CUDA Compiler: The CUDA version for compiling MMCV ops.\n \"\"\"\n env_info = {}\n env_info['sys.platform'] = sys.platform\n env_info['Python'] = sys.version.replace('\\n', '')\n\n cuda_available = torch.cuda.is_available()\n env_info['CUDA available'] = cuda_available\n\n if cuda_available:\n devices = defaultdict(list)\n for k in range(torch.cuda.device_count()):\n devices[torch.cuda.get_device_name(k)].append(str(k))\n for name, device_ids in devices.items():\n env_info['GPU ' + ','.join(device_ids)] = name\n\n from mmcv.utils.parrots_wrapper import _get_cuda_home\n CUDA_HOME = _get_cuda_home()\n env_info['CUDA_HOME'] = CUDA_HOME\n\n if CUDA_HOME is not None and osp.isdir(CUDA_HOME):\n try:\n nvcc = osp.join(CUDA_HOME, 'bin/nvcc')\n nvcc = subprocess.check_output(f'\"{nvcc}\" -V', shell=True)\n nvcc = nvcc.decode('utf-8').strip()\n release = nvcc.rfind('Cuda compilation tools')\n build = nvcc.rfind('Build ')\n nvcc = nvcc[release:build].strip()\n except subprocess.SubprocessError:\n nvcc = 'Not Available'\n env_info['NVCC'] = nvcc\n\n try:\n # Check C++ Compiler.\n # For Unix-like, sysconfig has 'CC' variable like 'gcc -pthread ...',\n # indicating the compiler used, we use this to get the compiler name\n import sysconfig\n cc = sysconfig.get_config_var('CC')\n if cc:\n cc = osp.basename(cc.split()[0])\n cc_info = subprocess.check_output(f'{cc} --version', shell=True)\n env_info['GCC'] = cc_info.decode('utf-8').partition(\n '\\n')[0].strip()\n else:\n # on Windows, cl.exe is not in PATH. We need to find the path.\n # distutils.ccompiler.new_compiler() returns a msvccompiler\n # object and after initialization, path to cl.exe is found.\n import locale\n import os\n from distutils.ccompiler import new_compiler\n ccompiler = new_compiler()\n ccompiler.initialize()\n cc = subprocess.check_output(\n f'{ccompiler.cc}', stderr=subprocess.STDOUT, shell=True)\n encoding = os.device_encoding(\n sys.stdout.fileno()) or locale.getpreferredencoding()\n env_info['MSVC'] = cc.decode(encoding).partition('\\n')[0].strip()\n env_info['GCC'] = 'n/a'\n except subprocess.CalledProcessError:\n env_info['GCC'] = 'n/a'\n\n env_info['PyTorch'] = torch.__version__\n env_info['PyTorch compiling details'] = get_build_config()\n\n try:\n import torchvision\n env_info['TorchVision'] = torchvision.__version__\n except ModuleNotFoundError:\n pass\n\n env_info['OpenCV'] = cv2.__version__\n\n env_info['MMCV'] = mmcv.__version__\n\n try:\n from mmcv.ops import get_compiler_version, get_compiling_cuda_version\n except ModuleNotFoundError:\n env_info['MMCV Compiler'] = 'n/a'\n env_info['MMCV CUDA Compiler'] = 'n/a'\n else:\n env_info['MMCV Compiler'] = get_compiler_version()\n env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version()\n\n return env_info\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
""" CONVERT HOURS INTO SECONDS Write a function that converts hours into seconds. Examples: - how_many_seconds(2) -> 7200 - how_many_seconds(10) -> 36000 - how_many_seconds(24) -> 86400 Notes: - 60 seconds in a minute; 60 minutes in a hour. - Don't forget to return your answer. """ """ U.P.E.R. (A) UNDERSTAND: - Objective: - Write an algorithm that takes in a single input integer (representing a given number of hours) and returns a single output (representing the equivalent number of seconds). - Expected Inputs: - Number: 1 - Data Type: integer - Variable Name: 'hrs_int' - Expected Outputs: - Number: 1 - Data Type: integer - Variable Name: 'secs_int' - My Examples: - how_many_seconds(1) -> 3600 - 1 hr * (60 min/1 hr) * (60 sec/1 min) = 3600 secs - how_many_seconds(5) -> 18000 - 5 hr * (60 min/1 hr) * (60 sec/1 min) = 18000 secs - how_many_seconds(12) -> 43200 - 12 hr * (60 min/1 hr) * (60 sec/1 min) = 43200 secs - Edge Cases & Constraints to Consider: - Can the input be negative? - No, because time is measured in positive units. The input must be greater than 0. - Can the input be a floating point number? - Yes, because the number of hours doesn't need to be whole in order to find an equivalent number of seconds. - Can the input be None? - No, because you cannot convert 'None' number of hours. (B) PLAN: (1) Create a function that takes in a single given input, 'hrs_int', and returns a single output, 'secs_int'. (2) Assign the value of 'None' to two new variables, 'mins_int' and 'secs_int'. (3) Make sure that a conversion of hours to seconds will NOT occur unless the given input, 'hrs_int', is in fact of either "integer" or "float" data type. (a) If the given input, 'hrs_int', is a valid argument, proceed with converting the given number of hours into an equivalent number of seconds. i. Convert the number of hours in 'hrs_int' into an equivalent number of minutes and store that value in the previously declared 'mins_int' variable. ii. Convert the number of minutes in 'mins_int' into an equivalent number of seconds and store that value in the previously declared 'secs_int' variable. (b) If the given input, 'hrs_int', is an INVALID argument (i.e. - negative value, not of 'integer' or 'float' data types, null), handle the error with a 'TypeError' exception. (4) Return the value of 'secs_int'. """ # (C) EXECUTE: # def how_many_seconds(hrs_int): # mins_int = None # secs_int = None # if hrs_int > 0 and hrs_int is not None: # mins_int = hrs_int * 60 # converts given hours into minutes # secs_int = mins_int * 60 # converts given minutes into seconds # else: # raise TypeError("Invalid input type") # return secs_int # (D) REFLECT/REFACTOR: # Asymptotic Analysis: # - Time Complexity = O(1) # - Space Complexity = O(1) # Can the brute force solution be optimized further? # - Yes, but only by reducing the total number of lines of code and NOT by # improving time/space complexity of the solution. def how_many_seconds(hrs_int): secs_int = None if hrs_int > 0 and hrs_int is not None: secs_int = hrs_int * 60 * 60 # converts given hours into seconds return secs_int else: raise TypeError("Invalid input type")
normal
{ "blob_id": "34c7e6b6bc687bc641b7e3b9c70fd0844af8e340", "index": 8969, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef how_many_seconds(hrs_int):\n secs_int = None\n if hrs_int > 0 and hrs_int is not None:\n secs_int = hrs_int * 60 * 60\n return secs_int\n else:\n raise TypeError('Invalid input type')\n", "step-3": "\"\"\"\nCONVERT HOURS INTO SECONDS\n\nWrite a function that converts hours into seconds.\n\nExamples:\n - how_many_seconds(2) -> 7200\n - how_many_seconds(10) -> 36000\n - how_many_seconds(24) -> 86400\n \nNotes:\n - 60 seconds in a minute; 60 minutes in a hour.\n - Don't forget to return your answer.\n\"\"\"\n\n\"\"\"\nU.P.E.R.\n\n(A) UNDERSTAND:\n - Objective:\n - Write an algorithm that takes in a single input integer (representing a\n given number of hours) and returns a single output (representing the \n equivalent number of seconds).\n \n - Expected Inputs:\n - Number: 1\n - Data Type: integer\n - Variable Name: 'hrs_int'\n \n - Expected Outputs:\n - Number: 1\n - Data Type: integer\n - Variable Name: 'secs_int'\n \n - My Examples:\n - how_many_seconds(1) -> 3600\n - 1 hr * (60 min/1 hr) * (60 sec/1 min) = 3600 secs\n - how_many_seconds(5) -> 18000\n - 5 hr * (60 min/1 hr) * (60 sec/1 min) = 18000 secs\n - how_many_seconds(12) -> 43200\n - 12 hr * (60 min/1 hr) * (60 sec/1 min) = 43200 secs\n\n - Edge Cases & Constraints to Consider:\n - Can the input be negative?\n - No, because time is measured in positive units. The input must be greater than 0.\n - Can the input be a floating point number?\n - Yes, because the number of hours doesn't need to be whole in order\n to find an equivalent number of seconds.\n - Can the input be None?\n - No, because you cannot convert 'None' number of hours.\n \n(B) PLAN:\n\n (1) Create a function that takes in a single given input, 'hrs_int', and returns a single output, 'secs_int'.\n \n (2) Assign the value of 'None' to two new variables, 'mins_int' and 'secs_int'.\n \n (3) Make sure that a conversion of hours to seconds will NOT occur unless the given input, 'hrs_int', is in fact of either \"integer\" or \"float\" data type.\n\n (a) If the given input, 'hrs_int', is a valid argument, proceed with converting the given number of hours into an equivalent number of seconds.\n \n i. Convert the number of hours in 'hrs_int' into an equivalent number of minutes and store that value in the previously declared 'mins_int' variable.\n \n ii. Convert the number of minutes in 'mins_int' into an equivalent number of seconds and store that value in the previously declared 'secs_int' variable.\n \n (b) If the given input, 'hrs_int', is an INVALID argument (i.e. - negative value, not of 'integer' or 'float' data types, null), handle the error with a 'TypeError' exception.\n \n (4) Return the value of 'secs_int'.\n\n\"\"\"\n\n# (C) EXECUTE:\n\n# def how_many_seconds(hrs_int):\n# mins_int = None\n# secs_int = None\n \n# if hrs_int > 0 and hrs_int is not None:\n# mins_int = hrs_int * 60 # converts given hours into minutes\n# secs_int = mins_int * 60 # converts given minutes into seconds\n# else: \n# raise TypeError(\"Invalid input type\")\n\n# return secs_int\n\n# (D) REFLECT/REFACTOR:\n\n# Asymptotic Analysis:\n# - Time Complexity = O(1)\n# - Space Complexity = O(1)\n\n# Can the brute force solution be optimized further?\n# - Yes, but only by reducing the total number of lines of code and NOT by\n# improving time/space complexity of the solution.\n\ndef how_many_seconds(hrs_int):\n secs_int = None\n \n if hrs_int > 0 and hrs_int is not None:\n secs_int = hrs_int * 60 * 60 # converts given hours into seconds\n return secs_int\n else: \n raise TypeError(\"Invalid input type\")", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# Generated by Django 2.2.10 on 2020-03-13 14:02 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('system', '0005_location'), ] operations = [ migrations.AddField( model_name='setting', name='runned_locations_initial_data', field=models.BooleanField(blank=True, default=False), ), migrations.AlterField( model_name='location', name='name', field=models.CharField(max_length=128, unique=True), ), ]
normal
{ "blob_id": "211ef4c64e42c54423ac8dab2128952874a2cf5a", "index": 7694, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('system', '0005_location')]\n operations = [migrations.AddField(model_name='setting', name=\n 'runned_locations_initial_data', field=models.BooleanField(blank=\n True, default=False)), migrations.AlterField(model_name='location',\n name='name', field=models.CharField(max_length=128, unique=True))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('system', '0005_location')]\n operations = [migrations.AddField(model_name='setting', name=\n 'runned_locations_initial_data', field=models.BooleanField(blank=\n True, default=False)), migrations.AlterField(model_name='location',\n name='name', field=models.CharField(max_length=128, unique=True))]\n", "step-5": "# Generated by Django 2.2.10 on 2020-03-13 14:02\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('system', '0005_location'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='setting',\n name='runned_locations_initial_data',\n field=models.BooleanField(blank=True, default=False),\n ),\n migrations.AlterField(\n model_name='location',\n name='name',\n field=models.CharField(max_length=128, unique=True),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class Desc(Descriptive): name = 'desc' attrs = () class Metadata(Descriptive): name = 'metadata' attrs = () class Title(Descriptive): name = 'title' attrs = () <|reserved_special_token_1|> <|reserved_special_token_0|> class Descriptive(Element): <|reserved_special_token_0|> class Desc(Descriptive): name = 'desc' attrs = () class Metadata(Descriptive): name = 'metadata' attrs = () class Title(Descriptive): name = 'title' attrs = () <|reserved_special_token_1|> <|reserved_special_token_0|> class Descriptive(Element): def __init__(self): self.allowedChildren = () class Desc(Descriptive): name = 'desc' attrs = () class Metadata(Descriptive): name = 'metadata' attrs = () class Title(Descriptive): name = 'title' attrs = () <|reserved_special_token_1|> from svjesus.ffz import genContent from svjesus.elements.Base import Element class Descriptive(Element): def __init__(self): self.allowedChildren = () class Desc(Descriptive): name = 'desc' attrs = () class Metadata(Descriptive): name = 'metadata' attrs = () class Title(Descriptive): name = 'title' attrs = () <|reserved_special_token_1|> from svjesus.ffz import genContent from svjesus.elements.Base import Element class Descriptive(Element): def __init__(self): self.allowedChildren = () # TODO: Check what's allowed # Descriptive elements class Desc(Descriptive): name = "desc" attrs = () class Metadata(Descriptive): name = "metadata" attrs = () class Title(Descriptive): name = "title" attrs = ()
flexible
{ "blob_id": "178570047458eb3eeda00f9153ef2159eb4cbef3", "index": 9188, "step-1": "<mask token>\n\n\nclass Desc(Descriptive):\n name = 'desc'\n attrs = ()\n\n\nclass Metadata(Descriptive):\n name = 'metadata'\n attrs = ()\n\n\nclass Title(Descriptive):\n name = 'title'\n attrs = ()\n", "step-2": "<mask token>\n\n\nclass Descriptive(Element):\n <mask token>\n\n\nclass Desc(Descriptive):\n name = 'desc'\n attrs = ()\n\n\nclass Metadata(Descriptive):\n name = 'metadata'\n attrs = ()\n\n\nclass Title(Descriptive):\n name = 'title'\n attrs = ()\n", "step-3": "<mask token>\n\n\nclass Descriptive(Element):\n\n def __init__(self):\n self.allowedChildren = ()\n\n\nclass Desc(Descriptive):\n name = 'desc'\n attrs = ()\n\n\nclass Metadata(Descriptive):\n name = 'metadata'\n attrs = ()\n\n\nclass Title(Descriptive):\n name = 'title'\n attrs = ()\n", "step-4": "from svjesus.ffz import genContent\nfrom svjesus.elements.Base import Element\n\n\nclass Descriptive(Element):\n\n def __init__(self):\n self.allowedChildren = ()\n\n\nclass Desc(Descriptive):\n name = 'desc'\n attrs = ()\n\n\nclass Metadata(Descriptive):\n name = 'metadata'\n attrs = ()\n\n\nclass Title(Descriptive):\n name = 'title'\n attrs = ()\n", "step-5": "from svjesus.ffz import genContent\nfrom svjesus.elements.Base import Element\n\nclass Descriptive(Element):\n\tdef __init__(self):\n\t\tself.allowedChildren = () # TODO: Check what's allowed\n\n# Descriptive elements\nclass Desc(Descriptive):\n\tname = \"desc\"\n\tattrs = ()\n\nclass Metadata(Descriptive):\n\tname = \"metadata\"\n\tattrs = ()\n\nclass Title(Descriptive):\n\tname = \"title\"\n\tattrs = ()", "step-ids": [ 6, 7, 8, 9, 10 ] }
[ 6, 7, 8, 9, 10 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class RNN_instruction_encoder(nn.Module): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class RNN_instruction_encoder(nn.Module): def __init__(self, vocab_size, word_vec_dim, hidden_size, n_layers, input_dropout_p=0, dropout_p=0, bidirectional=True, variable_lengths=True, word2vec=None, fix_embeddings=False, rnn_cell='lstm'): super(RNN_instruction_encoder, self).__init__() assert rnn_cell in ['lstm', 'gru'] self.variable_lengths = variable_lengths if word2vec is not None: assert word2vec.size(0) == vocab_size self.word_vec_dim = word2vec.size(1) self.embedding = nn.Embedding(vocab_size, self.word_vec_dim) self.embedding.weight = nn.Parameter(word2vec) else: self.word_vec_dim = word_vec_dim self.embedding = nn.Embedding(vocab_size, word_vec_dim) if fix_embeddings: self.embedding.weight.requires_grad = False if rnn_cell == 'lstm': self.rnn = nn.LSTM(self.word_vec_dim, hidden_size, n_layers, batch_first=True, bidirectional=bidirectional, dropout= dropout_p) elif rnn_cell == 'gru': self.rnn = nn.GRU(self.word_vec_dim, hidden_size, n_layers, batch_first=True, bidirectional=bidirectional, dropout= dropout_p) self.input_dropout = nn.Dropout(p=input_dropout_p) self.bidirectional = bidirectional self.n_layers = n_layers self.hidden_size = hidden_size self.rnn_cell = rnn_cell def forward(self, input_seq, input_lengths=None): embedded = self.embedding(input_seq) embedded = self.input_dropout(embedded) if self.variable_lengths: embedded = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first=True, enforce_sorted=False) output, hidden = self.rnn(embedded) if self.variable_lengths: output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True) return output, hidden <|reserved_special_token_1|> import torch.nn as nn class RNN_instruction_encoder(nn.Module): def __init__(self, vocab_size, word_vec_dim, hidden_size, n_layers, input_dropout_p=0, dropout_p=0, bidirectional=True, variable_lengths=True, word2vec=None, fix_embeddings=False, rnn_cell='lstm'): super(RNN_instruction_encoder, self).__init__() assert rnn_cell in ['lstm', 'gru'] self.variable_lengths = variable_lengths if word2vec is not None: assert word2vec.size(0) == vocab_size self.word_vec_dim = word2vec.size(1) self.embedding = nn.Embedding(vocab_size, self.word_vec_dim) self.embedding.weight = nn.Parameter(word2vec) else: self.word_vec_dim = word_vec_dim self.embedding = nn.Embedding(vocab_size, word_vec_dim) if fix_embeddings: self.embedding.weight.requires_grad = False if rnn_cell == 'lstm': self.rnn = nn.LSTM(self.word_vec_dim, hidden_size, n_layers, batch_first=True, bidirectional=bidirectional, dropout= dropout_p) elif rnn_cell == 'gru': self.rnn = nn.GRU(self.word_vec_dim, hidden_size, n_layers, batch_first=True, bidirectional=bidirectional, dropout= dropout_p) self.input_dropout = nn.Dropout(p=input_dropout_p) self.bidirectional = bidirectional self.n_layers = n_layers self.hidden_size = hidden_size self.rnn_cell = rnn_cell def forward(self, input_seq, input_lengths=None): embedded = self.embedding(input_seq) embedded = self.input_dropout(embedded) if self.variable_lengths: embedded = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first=True, enforce_sorted=False) output, hidden = self.rnn(embedded) if self.variable_lengths: output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True) return output, hidden
flexible
{ "blob_id": "16106250548ef60b475b009116cfeb7a25101637", "index": 7727, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass RNN_instruction_encoder(nn.Module):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass RNN_instruction_encoder(nn.Module):\n\n def __init__(self, vocab_size, word_vec_dim, hidden_size, n_layers,\n input_dropout_p=0, dropout_p=0, bidirectional=True,\n variable_lengths=True, word2vec=None, fix_embeddings=False,\n rnn_cell='lstm'):\n super(RNN_instruction_encoder, self).__init__()\n assert rnn_cell in ['lstm', 'gru']\n self.variable_lengths = variable_lengths\n if word2vec is not None:\n assert word2vec.size(0) == vocab_size\n self.word_vec_dim = word2vec.size(1)\n self.embedding = nn.Embedding(vocab_size, self.word_vec_dim)\n self.embedding.weight = nn.Parameter(word2vec)\n else:\n self.word_vec_dim = word_vec_dim\n self.embedding = nn.Embedding(vocab_size, word_vec_dim)\n if fix_embeddings:\n self.embedding.weight.requires_grad = False\n if rnn_cell == 'lstm':\n self.rnn = nn.LSTM(self.word_vec_dim, hidden_size, n_layers,\n batch_first=True, bidirectional=bidirectional, dropout=\n dropout_p)\n elif rnn_cell == 'gru':\n self.rnn = nn.GRU(self.word_vec_dim, hidden_size, n_layers,\n batch_first=True, bidirectional=bidirectional, dropout=\n dropout_p)\n self.input_dropout = nn.Dropout(p=input_dropout_p)\n self.bidirectional = bidirectional\n self.n_layers = n_layers\n self.hidden_size = hidden_size\n self.rnn_cell = rnn_cell\n\n def forward(self, input_seq, input_lengths=None):\n embedded = self.embedding(input_seq)\n embedded = self.input_dropout(embedded)\n if self.variable_lengths:\n embedded = nn.utils.rnn.pack_padded_sequence(embedded,\n input_lengths, batch_first=True, enforce_sorted=False)\n output, hidden = self.rnn(embedded)\n if self.variable_lengths:\n output, _ = nn.utils.rnn.pad_packed_sequence(output,\n batch_first=True)\n return output, hidden\n", "step-4": "import torch.nn as nn\n\n\nclass RNN_instruction_encoder(nn.Module):\n\n def __init__(self, vocab_size, word_vec_dim, hidden_size, n_layers,\n input_dropout_p=0, dropout_p=0, bidirectional=True,\n variable_lengths=True, word2vec=None, fix_embeddings=False,\n rnn_cell='lstm'):\n super(RNN_instruction_encoder, self).__init__()\n assert rnn_cell in ['lstm', 'gru']\n self.variable_lengths = variable_lengths\n if word2vec is not None:\n assert word2vec.size(0) == vocab_size\n self.word_vec_dim = word2vec.size(1)\n self.embedding = nn.Embedding(vocab_size, self.word_vec_dim)\n self.embedding.weight = nn.Parameter(word2vec)\n else:\n self.word_vec_dim = word_vec_dim\n self.embedding = nn.Embedding(vocab_size, word_vec_dim)\n if fix_embeddings:\n self.embedding.weight.requires_grad = False\n if rnn_cell == 'lstm':\n self.rnn = nn.LSTM(self.word_vec_dim, hidden_size, n_layers,\n batch_first=True, bidirectional=bidirectional, dropout=\n dropout_p)\n elif rnn_cell == 'gru':\n self.rnn = nn.GRU(self.word_vec_dim, hidden_size, n_layers,\n batch_first=True, bidirectional=bidirectional, dropout=\n dropout_p)\n self.input_dropout = nn.Dropout(p=input_dropout_p)\n self.bidirectional = bidirectional\n self.n_layers = n_layers\n self.hidden_size = hidden_size\n self.rnn_cell = rnn_cell\n\n def forward(self, input_seq, input_lengths=None):\n embedded = self.embedding(input_seq)\n embedded = self.input_dropout(embedded)\n if self.variable_lengths:\n embedded = nn.utils.rnn.pack_padded_sequence(embedded,\n input_lengths, batch_first=True, enforce_sorted=False)\n output, hidden = self.rnn(embedded)\n if self.variable_lengths:\n output, _ = nn.utils.rnn.pad_packed_sequence(output,\n batch_first=True)\n return output, hidden\n", "step-5": null, "step-ids": [ 0, 1, 3, 4 ] }
[ 0, 1, 3, 4 ]
from aiogram import Dispatcher from create_bot import bot from data_base import sqlite_db # new user in group async def new_member(message): new_user = message.new_chat_members[0] user_id = new_user['id'] if new_user['username']: user_name = new_user['username'] elif new_user['first_name']: user_name = new_user['first_name'] elif new_user['last_name']: user_name = new_user['last_name'] else: user_name = 'Пользователь без имени' await sqlite_db.sql_add_user_to_db(user_id, user_name) await bot.send_message(message.chat.id, f'Добро пожаловать, {user_name}!\nКоманда - /start переход' f' в пользовательское меню.\nКоманда - /help помощь по командам бота.') # left user from group async def left_member(message): left_user = message.left_chat_member user_name = await sqlite_db.sql_get_user_name(left_user['id']) user_name = user_name[0][0] await sqlite_db.sql_del_user_from_db(left_user['id']) await bot.send_message(message.chat.id, f'Будем рады Вас видеть, {user_name}! Возвращайтесь!') def register_handlers_for_other(dp: Dispatcher): dp.register_message_handler(new_member, content_types=["new_chat_members"]) dp.register_message_handler(left_member, content_types=["left_chat_member"])
normal
{ "blob_id": "dfcfa4fa036fe8c058d66fc0b9ea73ddb9d4446e", "index": 7524, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef register_handlers_for_other(dp: Dispatcher):\n dp.register_message_handler(new_member, content_types=['new_chat_members'])\n dp.register_message_handler(left_member, content_types=['left_chat_member']\n )\n", "step-3": "<mask token>\n\n\nasync def new_member(message):\n new_user = message.new_chat_members[0]\n user_id = new_user['id']\n if new_user['username']:\n user_name = new_user['username']\n elif new_user['first_name']:\n user_name = new_user['first_name']\n elif new_user['last_name']:\n user_name = new_user['last_name']\n else:\n user_name = 'Пользователь без имени'\n await sqlite_db.sql_add_user_to_db(user_id, user_name)\n await bot.send_message(message.chat.id,\n f\"\"\"Добро пожаловать, {user_name}!\nКоманда - /start переход в пользовательское меню.\nКоманда - /help помощь по командам бота.\"\"\"\n )\n\n\nasync def left_member(message):\n left_user = message.left_chat_member\n user_name = await sqlite_db.sql_get_user_name(left_user['id'])\n user_name = user_name[0][0]\n await sqlite_db.sql_del_user_from_db(left_user['id'])\n await bot.send_message(message.chat.id,\n f'Будем рады Вас видеть, {user_name}! Возвращайтесь!')\n\n\ndef register_handlers_for_other(dp: Dispatcher):\n dp.register_message_handler(new_member, content_types=['new_chat_members'])\n dp.register_message_handler(left_member, content_types=['left_chat_member']\n )\n", "step-4": "from aiogram import Dispatcher\nfrom create_bot import bot\nfrom data_base import sqlite_db\n\n\nasync def new_member(message):\n new_user = message.new_chat_members[0]\n user_id = new_user['id']\n if new_user['username']:\n user_name = new_user['username']\n elif new_user['first_name']:\n user_name = new_user['first_name']\n elif new_user['last_name']:\n user_name = new_user['last_name']\n else:\n user_name = 'Пользователь без имени'\n await sqlite_db.sql_add_user_to_db(user_id, user_name)\n await bot.send_message(message.chat.id,\n f\"\"\"Добро пожаловать, {user_name}!\nКоманда - /start переход в пользовательское меню.\nКоманда - /help помощь по командам бота.\"\"\"\n )\n\n\nasync def left_member(message):\n left_user = message.left_chat_member\n user_name = await sqlite_db.sql_get_user_name(left_user['id'])\n user_name = user_name[0][0]\n await sqlite_db.sql_del_user_from_db(left_user['id'])\n await bot.send_message(message.chat.id,\n f'Будем рады Вас видеть, {user_name}! Возвращайтесь!')\n\n\ndef register_handlers_for_other(dp: Dispatcher):\n dp.register_message_handler(new_member, content_types=['new_chat_members'])\n dp.register_message_handler(left_member, content_types=['left_chat_member']\n )\n", "step-5": "from aiogram import Dispatcher\n\nfrom create_bot import bot\nfrom data_base import sqlite_db\n\n\n# new user in group\nasync def new_member(message):\n new_user = message.new_chat_members[0]\n user_id = new_user['id']\n if new_user['username']:\n user_name = new_user['username']\n elif new_user['first_name']:\n user_name = new_user['first_name']\n elif new_user['last_name']:\n user_name = new_user['last_name']\n else:\n user_name = 'Пользователь без имени'\n await sqlite_db.sql_add_user_to_db(user_id, user_name)\n await bot.send_message(message.chat.id, f'Добро пожаловать, {user_name}!\\nКоманда - /start переход'\n f' в пользовательское меню.\\nКоманда - /help помощь по командам бота.')\n\n\n# left user from group\nasync def left_member(message):\n left_user = message.left_chat_member\n user_name = await sqlite_db.sql_get_user_name(left_user['id'])\n user_name = user_name[0][0]\n await sqlite_db.sql_del_user_from_db(left_user['id'])\n await bot.send_message(message.chat.id, f'Будем рады Вас видеть, {user_name}! Возвращайтесь!')\n\n\ndef register_handlers_for_other(dp: Dispatcher):\n dp.register_message_handler(new_member, content_types=[\"new_chat_members\"])\n dp.register_message_handler(left_member, content_types=[\"left_chat_member\"])\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def compare(a): if a > 11: print('big') elif a == 10: print('reallybig') <|reserved_special_token_0|> <|reserved_special_token_1|> def drive(carspeed): if carspeed > 200: print('very fast') elif carspeed > 100: print('toofast') elif carspeed > 70 and carspeed < 80: print('optimal speed') else: print('below speed limit') <|reserved_special_token_0|> def compare(a): if a > 11: print('big') elif a == 10: print('reallybig') <|reserved_special_token_0|> <|reserved_special_token_1|> def drive(carspeed): if carspeed > 200: print('very fast') elif carspeed > 100: print('toofast') elif carspeed > 70 and carspeed < 80: print('optimal speed') else: print('below speed limit') print(drive(234)) print(drive(34)) drive(134) def compare(a): if a > 11: print('big') elif a == 10: print('reallybig') compare(10) <|reserved_special_token_1|> def drive(carspeed): if carspeed>200: print("very fast") elif carspeed>100: print("toofast") elif carspeed>70 and carspeed<80: print("optimal speed") else: print("below speed limit") print(drive(234)) print(drive(34)) drive(134) #how none will be removed? def compare(a): if a>11: print("big") elif a==10: print("reallybig") compare(10)
flexible
{ "blob_id": "de3eaa5823fb396050527c148273c30bed6ce8ca", "index": 2644, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef compare(a):\n if a > 11:\n print('big')\n elif a == 10:\n print('reallybig')\n\n\n<mask token>\n", "step-3": "def drive(carspeed):\n if carspeed > 200:\n print('very fast')\n elif carspeed > 100:\n print('toofast')\n elif carspeed > 70 and carspeed < 80:\n print('optimal speed')\n else:\n print('below speed limit')\n\n\n<mask token>\n\n\ndef compare(a):\n if a > 11:\n print('big')\n elif a == 10:\n print('reallybig')\n\n\n<mask token>\n", "step-4": "def drive(carspeed):\n if carspeed > 200:\n print('very fast')\n elif carspeed > 100:\n print('toofast')\n elif carspeed > 70 and carspeed < 80:\n print('optimal speed')\n else:\n print('below speed limit')\n\n\nprint(drive(234))\nprint(drive(34))\ndrive(134)\n\n\ndef compare(a):\n if a > 11:\n print('big')\n elif a == 10:\n print('reallybig')\n\n\ncompare(10)\n", "step-5": "\ndef drive(carspeed):\n\tif carspeed>200:\n\t\tprint(\"very fast\")\n\telif carspeed>100:\n\t\tprint(\"toofast\")\n\telif carspeed>70 and carspeed<80:\n\t\tprint(\"optimal speed\")\n\telse:\n\t\tprint(\"below speed limit\")\nprint(drive(234))\nprint(drive(34))\ndrive(134)\n#how none will be removed?\ndef compare(a):\n\tif a>11:\n\t\tprint(\"big\")\n\telif a==10:\n\t\tprint(\"reallybig\")\ncompare(10)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import pandas as pd df1 = pd.read_csv('Tweets1.csv', names=['tweet']) df2 = pd.read_csv('Tweets2.csv', names=['tweet']) df3 = pd.read_csv('Tweets3.csv', names=['tweet']) df = pd.concat([df1, df2, df3], axis=0, join='outer', ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False, copy=True) df.to_csv('Tweets.csv', index=None, header=None)
normal
{ "blob_id": "7d6196268b85861e76efaa53e14976f2eae09405", "index": 3226, "step-1": "<mask token>\n", "step-2": "<mask token>\ndf.to_csv('Tweets.csv', index=None, header=None)\n", "step-3": "<mask token>\ndf1 = pd.read_csv('Tweets1.csv', names=['tweet'])\ndf2 = pd.read_csv('Tweets2.csv', names=['tweet'])\ndf3 = pd.read_csv('Tweets3.csv', names=['tweet'])\ndf = pd.concat([df1, df2, df3], axis=0, join='outer', ignore_index=False,\n keys=None, levels=None, names=None, verify_integrity=False, copy=True)\ndf.to_csv('Tweets.csv', index=None, header=None)\n", "step-4": "import pandas as pd\ndf1 = pd.read_csv('Tweets1.csv', names=['tweet'])\ndf2 = pd.read_csv('Tweets2.csv', names=['tweet'])\ndf3 = pd.read_csv('Tweets3.csv', names=['tweet'])\ndf = pd.concat([df1, df2, df3], axis=0, join='outer', ignore_index=False,\n keys=None, levels=None, names=None, verify_integrity=False, copy=True)\ndf.to_csv('Tweets.csv', index=None, header=None)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#include os #include math output_file = 'output/mvnt' def file_writeout(srvN, pos); with open(output_file, 'a') as f: f.write(srvN, ' to ', pos) return 0 class leg(legN): def __init__(legN): srvHY = 'srv' + legN + 'HY' srvHX = 'srv' + legN + 'HX' srvEY = 'srv' + legN + 'EY'
normal
{ "blob_id": "901f87752026673c41a70655e987ecc2d5cb369f", "index": 7273, "step-1": "#include os\n#include math\n\noutput_file = 'output/mvnt'\n\ndef file_writeout(srvN, pos);\n with open(output_file, 'a') as f:\n f.write(srvN, ' to ', pos)\n return 0\n \nclass leg(legN):\n def __init__(legN):\n srvHY = 'srv' + legN + 'HY'\n srvHX = 'srv' + legN + 'HX'\n srvEY = 'srv' + legN + 'EY'\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> def get_choice(attempt): """ return an integer input from the user """ try: user_text = '' if attempt == 1: user_text = 'Guess a number between 0 and 99:' choice = int(input(user_text)) except ValueError: return get_choice() return choice def get_random(): K_HIGH = 99 K_LOW = 0 return random.randint(K_LOW, K_HIGH) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def get_choice(attempt): """ return an integer input from the user """ try: user_text = '' if attempt == 1: user_text = 'Guess a number between 0 and 99:' choice = int(input(user_text)) except ValueError: return get_choice() return choice def get_random(): K_HIGH = 99 K_LOW = 0 return random.randint(K_LOW, K_HIGH) <|reserved_special_token_0|> while choice != rand: attempt += 1 choice = get_choice(attempt) if choice > rand: print('Too high. Guess again:', end='') elif choice < rand: print('Too low. Guess again:', end='') else: print('Correct. It took you {0} guesses.'.format(attempt)) <|reserved_special_token_1|> <|reserved_special_token_0|> def get_choice(attempt): """ return an integer input from the user """ try: user_text = '' if attempt == 1: user_text = 'Guess a number between 0 and 99:' choice = int(input(user_text)) except ValueError: return get_choice() return choice def get_random(): K_HIGH = 99 K_LOW = 0 return random.randint(K_LOW, K_HIGH) choice = 0 rand = get_random() attempt = 0 while choice != rand: attempt += 1 choice = get_choice(attempt) if choice > rand: print('Too high. Guess again:', end='') elif choice < rand: print('Too low. Guess again:', end='') else: print('Correct. It took you {0} guesses.'.format(attempt)) <|reserved_special_token_1|> <|reserved_special_token_0|> import random def get_choice(attempt): """ return an integer input from the user """ try: user_text = '' if attempt == 1: user_text = 'Guess a number between 0 and 99:' choice = int(input(user_text)) except ValueError: return get_choice() return choice def get_random(): K_HIGH = 99 K_LOW = 0 return random.randint(K_LOW, K_HIGH) choice = 0 rand = get_random() attempt = 0 while choice != rand: attempt += 1 choice = get_choice(attempt) if choice > rand: print('Too high. Guess again:', end='') elif choice < rand: print('Too low. Guess again:', end='') else: print('Correct. It took you {0} guesses.'.format(attempt)) <|reserved_special_token_1|> """ ********************************************************************* * Project : POP1 (Practical Exam) * Program name : q2.py * Author : varunk01 * Purpose : Attempts to solve the question 2 from the exam paper * Date created : 28/05/2018 * * Date Author Ver Comment * 28/05/2018 varunk01 0.1 Initial Version ********************************************************************** Write a program for a number guessing game. The program generates a random number between 0 and 99, and then asks the user to guess that number. For each guess the program replies Correct, Too low, or Too high. If the number is correct, the program prints the number of guesses it took. If not, the program asks the user to guess again. For example: Guess a number between 0 and 99: 50 Too low. Guess again: 75 Too high. Guess again: 60 Too high. Guess again: 54 Correct. It took you 4 guesses. """ import random def get_choice(attempt): """ return an integer input from the user """ try: user_text='' if attempt ==1: user_text ='Guess a number between 0 and 99:' choice = int(input(user_text)) except ValueError: return get_choice() return choice def get_random(): K_HIGH =99 K_LOW =0 return random.randint(K_LOW,K_HIGH) choice =0 rand = get_random() attempt =0 while (choice != rand): attempt += 1 choice =get_choice(attempt) if choice > rand: print('Too high. Guess again:',end='') elif choice < rand: print('Too low. Guess again:',end='') else: print('Correct. It took you {0} guesses.'.format(attempt)) #if __name__ == '__main__':
flexible
{ "blob_id": "f7d487ec99e2fa901677ab9aec0760a396722e12", "index": 8245, "step-1": "<mask token>\n\n\ndef get_choice(attempt):\n \"\"\"\n return an integer input from the user\n \"\"\"\n try:\n user_text = ''\n if attempt == 1:\n user_text = 'Guess a number between 0 and 99:'\n choice = int(input(user_text))\n except ValueError:\n return get_choice()\n return choice\n\n\ndef get_random():\n K_HIGH = 99\n K_LOW = 0\n return random.randint(K_LOW, K_HIGH)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_choice(attempt):\n \"\"\"\n return an integer input from the user\n \"\"\"\n try:\n user_text = ''\n if attempt == 1:\n user_text = 'Guess a number between 0 and 99:'\n choice = int(input(user_text))\n except ValueError:\n return get_choice()\n return choice\n\n\ndef get_random():\n K_HIGH = 99\n K_LOW = 0\n return random.randint(K_LOW, K_HIGH)\n\n\n<mask token>\nwhile choice != rand:\n attempt += 1\n choice = get_choice(attempt)\n if choice > rand:\n print('Too high. Guess again:', end='')\n elif choice < rand:\n print('Too low. Guess again:', end='')\n else:\n print('Correct. It took you {0} guesses.'.format(attempt))\n", "step-3": "<mask token>\n\n\ndef get_choice(attempt):\n \"\"\"\n return an integer input from the user\n \"\"\"\n try:\n user_text = ''\n if attempt == 1:\n user_text = 'Guess a number between 0 and 99:'\n choice = int(input(user_text))\n except ValueError:\n return get_choice()\n return choice\n\n\ndef get_random():\n K_HIGH = 99\n K_LOW = 0\n return random.randint(K_LOW, K_HIGH)\n\n\nchoice = 0\nrand = get_random()\nattempt = 0\nwhile choice != rand:\n attempt += 1\n choice = get_choice(attempt)\n if choice > rand:\n print('Too high. Guess again:', end='')\n elif choice < rand:\n print('Too low. Guess again:', end='')\n else:\n print('Correct. It took you {0} guesses.'.format(attempt))\n", "step-4": "<mask token>\nimport random\n\n\ndef get_choice(attempt):\n \"\"\"\n return an integer input from the user\n \"\"\"\n try:\n user_text = ''\n if attempt == 1:\n user_text = 'Guess a number between 0 and 99:'\n choice = int(input(user_text))\n except ValueError:\n return get_choice()\n return choice\n\n\ndef get_random():\n K_HIGH = 99\n K_LOW = 0\n return random.randint(K_LOW, K_HIGH)\n\n\nchoice = 0\nrand = get_random()\nattempt = 0\nwhile choice != rand:\n attempt += 1\n choice = get_choice(attempt)\n if choice > rand:\n print('Too high. Guess again:', end='')\n elif choice < rand:\n print('Too low. Guess again:', end='')\n else:\n print('Correct. It took you {0} guesses.'.format(attempt))\n", "step-5": "\"\"\"\n*********************************************************************\n* Project : POP1 (Practical Exam)\n* Program name : q2.py\n* Author : varunk01\n* Purpose : Attempts to solve the question 2 from the exam paper\n* Date created : 28/05/2018\n*\n* Date Author Ver Comment\n* 28/05/2018 varunk01 0.1 Initial Version\n**********************************************************************\nWrite a program for a number guessing game. The program generates a random\nnumber between 0 and 99, and then asks the user to guess that number. For\neach guess the program replies Correct, Too low, or Too high. If the number\nis correct, the program prints the number of guesses it took. If not, the program\nasks the user to guess again. For example:\nGuess a number between 0 and 99: 50\nToo low. Guess again: 75\nToo high. Guess again: 60\nToo high. Guess again: 54\nCorrect. It took you 4 guesses.\n\"\"\"\n\nimport random\n\ndef get_choice(attempt):\n \"\"\"\n return an integer input from the user\n \"\"\"\n try:\n user_text=''\n\n if attempt ==1:\n user_text ='Guess a number between 0 and 99:'\n \n choice = int(input(user_text))\n except ValueError:\n return get_choice()\n return choice\n\ndef get_random():\n K_HIGH =99\n K_LOW =0\n return random.randint(K_LOW,K_HIGH)\n\nchoice =0\nrand = get_random()\nattempt =0\n\nwhile (choice != rand):\n attempt += 1\n choice =get_choice(attempt)\n \n if choice > rand:\n print('Too high. Guess again:',end='')\n elif choice < rand:\n print('Too low. Guess again:',end='')\n else:\n print('Correct. It took you {0} guesses.'.format(attempt))\n\n\n#if __name__ == '__main__':\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import numpy as np import dxchange import ptychotomo if __name__ == "__main__": # read object u = dxchange.read_tiff('data/init_object.tiff') u = u+1j*u/2 nz, n, _ = u.shape # parameters center = n/2 ntheta = 384 ne = 3*n//2 ngpus = 1 pnz = nz//2 theta = np.linspace(0, 4*np.pi, ntheta).astype('float32') # simulate data with ptychotomo.SolverTomo(theta, ntheta, nz, n, pnz, center, ngpus) as tslv: data = tslv.fwd_tomo_batch(u) # adjoint test with data padding with ptychotomo.SolverTomo(theta, ntheta, nz, ne, pnz, center+(ne-n)/2, ngpus) as tslv: data = ptychotomo.utils.paddata(data, ne) ua = tslv.adj_tomo_batch(data) ua = ptychotomo.utils.unpadobject(ua, n) print(f'norm data = {np.linalg.norm(data)}') print(f'norm object = {np.linalg.norm(ua)}') print( f'<u,R*Ru>=<Ru,Ru>: {np.sum(u*np.conj(ua)):e} ? {np.sum(data*np.conj(data)):e}')
normal
{ "blob_id": "4ed6f4db4c9c3319d6289ba402f81bbd8accf915", "index": 9782, "step-1": "<mask token>\n", "step-2": "<mask token>\nif __name__ == '__main__':\n u = dxchange.read_tiff('data/init_object.tiff')\n u = u + 1.0j * u / 2\n nz, n, _ = u.shape\n center = n / 2\n ntheta = 384\n ne = 3 * n // 2\n ngpus = 1\n pnz = nz // 2\n theta = np.linspace(0, 4 * np.pi, ntheta).astype('float32')\n with ptychotomo.SolverTomo(theta, ntheta, nz, n, pnz, center, ngpus\n ) as tslv:\n data = tslv.fwd_tomo_batch(u)\n with ptychotomo.SolverTomo(theta, ntheta, nz, ne, pnz, center + (ne - n\n ) / 2, ngpus) as tslv:\n data = ptychotomo.utils.paddata(data, ne)\n ua = tslv.adj_tomo_batch(data)\n ua = ptychotomo.utils.unpadobject(ua, n)\n print(f'norm data = {np.linalg.norm(data)}')\n print(f'norm object = {np.linalg.norm(ua)}')\n print(\n f'<u,R*Ru>=<Ru,Ru>: {np.sum(u * np.conj(ua)):e} ? {np.sum(data * np.conj(data)):e}'\n )\n", "step-3": "import numpy as np\nimport dxchange\nimport ptychotomo\nif __name__ == '__main__':\n u = dxchange.read_tiff('data/init_object.tiff')\n u = u + 1.0j * u / 2\n nz, n, _ = u.shape\n center = n / 2\n ntheta = 384\n ne = 3 * n // 2\n ngpus = 1\n pnz = nz // 2\n theta = np.linspace(0, 4 * np.pi, ntheta).astype('float32')\n with ptychotomo.SolverTomo(theta, ntheta, nz, n, pnz, center, ngpus\n ) as tslv:\n data = tslv.fwd_tomo_batch(u)\n with ptychotomo.SolverTomo(theta, ntheta, nz, ne, pnz, center + (ne - n\n ) / 2, ngpus) as tslv:\n data = ptychotomo.utils.paddata(data, ne)\n ua = tslv.adj_tomo_batch(data)\n ua = ptychotomo.utils.unpadobject(ua, n)\n print(f'norm data = {np.linalg.norm(data)}')\n print(f'norm object = {np.linalg.norm(ua)}')\n print(\n f'<u,R*Ru>=<Ru,Ru>: {np.sum(u * np.conj(ua)):e} ? {np.sum(data * np.conj(data)):e}'\n )\n", "step-4": "import numpy as np\nimport dxchange\nimport ptychotomo\n\nif __name__ == \"__main__\":\n \n # read object\n u = dxchange.read_tiff('data/init_object.tiff')\n u = u+1j*u/2\n\n nz, n, _ = u.shape\n\n # parameters\n center = n/2\n ntheta = 384\n ne = 3*n//2\n ngpus = 1\n pnz = nz//2\n theta = np.linspace(0, 4*np.pi, ntheta).astype('float32')\n\n # simulate data\n with ptychotomo.SolverTomo(theta, ntheta, nz, n, pnz, center, ngpus) as tslv:\n data = tslv.fwd_tomo_batch(u)\n\n # adjoint test with data padding\n with ptychotomo.SolverTomo(theta, ntheta, nz, ne, pnz, center+(ne-n)/2, ngpus) as tslv:\n data = ptychotomo.utils.paddata(data, ne)\n ua = tslv.adj_tomo_batch(data)\n ua = ptychotomo.utils.unpadobject(ua, n)\n\n print(f'norm data = {np.linalg.norm(data)}')\n print(f'norm object = {np.linalg.norm(ua)}')\n print(\n f'<u,R*Ru>=<Ru,Ru>: {np.sum(u*np.conj(ua)):e} ? {np.sum(data*np.conj(data)):e}')\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python3 # # nextskeleton - An assembler skeleton for the ZX Spectrum Next # # Copyright (C) 2020 Richard "Shred" Körber # https://github.com/shred/nextskeleton # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import struct import sys parser = argparse.ArgumentParser(description='Generate an autoexec.bas that launches a .nex file') parser.add_argument('nex', help='path of the .nex file to be launched') parser.add_argument('file', help='autoexec.bas file to be generated') args = parser.parse_args() command = '.nexload ' + args.nex + '\r' contents = bytearray(128) contents[0:8] = 'PLUS3DOS'.encode('ASCII') # +3DOS signature contents[8] = 0x1A contents[9:11] = [0x01, 0x00] # Issue and Version contents += bytearray((0x00, 0x0A)) # Line number 10 contents += struct.pack('<H', len(command)) # Line length contents += command.encode('ASCII') # BASIC line programLength = len(contents) - 128 # Length of the BASIC program contents[15] = 0x00 # DOS header: PROGRAM contents[16:18] = struct.pack('<H', programLength) # DOS header: length contents[18:20] = struct.pack('<H', 10) # DOS header: run at line 10 contents[20:22] = struct.pack('<H', programLength) # DOS header: offset to prog contents[11:15] = struct.pack('<L', len(contents)) # Set total length contents[127] = sum(contents[0:126]) & 0xFF # Compute checksum with open(args.file, 'wb') as f: f.write(contents)
normal
{ "blob_id": "0744ec646e7b9303c67c25dff2997568c6171b91", "index": 108, "step-1": "<mask token>\n", "step-2": "<mask token>\nparser.add_argument('nex', help='path of the .nex file to be launched')\nparser.add_argument('file', help='autoexec.bas file to be generated')\n<mask token>\ncontents += bytearray((0, 10))\ncontents += struct.pack('<H', len(command))\ncontents += command.encode('ASCII')\n<mask token>\nwith open(args.file, 'wb') as f:\n f.write(contents)\n", "step-3": "<mask token>\nparser = argparse.ArgumentParser(description=\n 'Generate an autoexec.bas that launches a .nex file')\nparser.add_argument('nex', help='path of the .nex file to be launched')\nparser.add_argument('file', help='autoexec.bas file to be generated')\nargs = parser.parse_args()\ncommand = '.nexload ' + args.nex + '\\r'\ncontents = bytearray(128)\ncontents[0:8] = 'PLUS3DOS'.encode('ASCII')\ncontents[8] = 26\ncontents[9:11] = [1, 0]\ncontents += bytearray((0, 10))\ncontents += struct.pack('<H', len(command))\ncontents += command.encode('ASCII')\nprogramLength = len(contents) - 128\ncontents[15] = 0\ncontents[16:18] = struct.pack('<H', programLength)\ncontents[18:20] = struct.pack('<H', 10)\ncontents[20:22] = struct.pack('<H', programLength)\ncontents[11:15] = struct.pack('<L', len(contents))\ncontents[127] = sum(contents[0:126]) & 255\nwith open(args.file, 'wb') as f:\n f.write(contents)\n", "step-4": "import argparse\nimport struct\nimport sys\nparser = argparse.ArgumentParser(description=\n 'Generate an autoexec.bas that launches a .nex file')\nparser.add_argument('nex', help='path of the .nex file to be launched')\nparser.add_argument('file', help='autoexec.bas file to be generated')\nargs = parser.parse_args()\ncommand = '.nexload ' + args.nex + '\\r'\ncontents = bytearray(128)\ncontents[0:8] = 'PLUS3DOS'.encode('ASCII')\ncontents[8] = 26\ncontents[9:11] = [1, 0]\ncontents += bytearray((0, 10))\ncontents += struct.pack('<H', len(command))\ncontents += command.encode('ASCII')\nprogramLength = len(contents) - 128\ncontents[15] = 0\ncontents[16:18] = struct.pack('<H', programLength)\ncontents[18:20] = struct.pack('<H', 10)\ncontents[20:22] = struct.pack('<H', programLength)\ncontents[11:15] = struct.pack('<L', len(contents))\ncontents[127] = sum(contents[0:126]) & 255\nwith open(args.file, 'wb') as f:\n f.write(contents)\n", "step-5": "#!/usr/bin/env python3\n#\n# nextskeleton - An assembler skeleton for the ZX Spectrum Next\n#\n# Copyright (C) 2020 Richard \"Shred\" Körber\n# https://github.com/shred/nextskeleton\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport argparse\nimport struct\nimport sys\n\nparser = argparse.ArgumentParser(description='Generate an autoexec.bas that launches a .nex file')\nparser.add_argument('nex',\n help='path of the .nex file to be launched')\nparser.add_argument('file',\n help='autoexec.bas file to be generated')\nargs = parser.parse_args()\n\ncommand = '.nexload ' + args.nex + '\\r'\n\ncontents = bytearray(128)\ncontents[0:8] = 'PLUS3DOS'.encode('ASCII') # +3DOS signature\ncontents[8] = 0x1A\ncontents[9:11] = [0x01, 0x00] # Issue and Version\n\ncontents += bytearray((0x00, 0x0A)) # Line number 10\ncontents += struct.pack('<H', len(command)) # Line length\ncontents += command.encode('ASCII') # BASIC line\nprogramLength = len(contents) - 128 # Length of the BASIC program\n\ncontents[15] = 0x00 # DOS header: PROGRAM\ncontents[16:18] = struct.pack('<H', programLength) # DOS header: length\ncontents[18:20] = struct.pack('<H', 10) # DOS header: run at line 10\ncontents[20:22] = struct.pack('<H', programLength) # DOS header: offset to prog\ncontents[11:15] = struct.pack('<L', len(contents)) # Set total length\ncontents[127] = sum(contents[0:126]) & 0xFF # Compute checksum\n\nwith open(args.file, 'wb') as f:\n f.write(contents)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def geoDistance(p1, p2): return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12'] <|reserved_special_token_0|> def compare(f): return geoDistance(f.getLocation(), melbourne) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def geoDistance(p1, p2): return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12'] <|reserved_special_token_0|> def compare(f): return geoDistance(f.getLocation(), melbourne) for i in sorted(VicEmergency.getItems(), key=compare): print(i.properties['sourceTitle']) print(i.properties['category1']) print(i.properties['location']) print('{:.0f}km'.format(geoDistance(i.getLocation(), melbourne) / 1000)) print('============================') <|reserved_special_token_1|> <|reserved_special_token_0|> def geoDistance(p1, p2): return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12'] melbourne = Point(144.962272, -37.812274) def compare(f): return geoDistance(f.getLocation(), melbourne) for i in sorted(VicEmergency.getItems(), key=compare): print(i.properties['sourceTitle']) print(i.properties['category1']) print(i.properties['location']) print('{:.0f}km'.format(geoDistance(i.getLocation(), melbourne) / 1000)) print('============================') <|reserved_special_token_1|> from context import vicemergencyapi from vicemergencyapi.vicemergency import VicEmergency from geographiclib.geodesic import Geodesic from shapely.geometry import Point def geoDistance(p1, p2): return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12'] melbourne = Point(144.962272, -37.812274) def compare(f): return geoDistance(f.getLocation(), melbourne) for i in sorted(VicEmergency.getItems(), key=compare): print(i.properties['sourceTitle']) print(i.properties['category1']) print(i.properties['location']) print('{:.0f}km'.format(geoDistance(i.getLocation(), melbourne) / 1000)) print('============================') <|reserved_special_token_1|> from context import vicemergencyapi from vicemergencyapi.vicemergency import VicEmergency from geographiclib.geodesic import Geodesic from shapely.geometry import Point def geoDistance(p1, p2): return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12'] melbourne = Point(144.962272, -37.812274) def compare(f): return geoDistance(f.getLocation(), melbourne) for i in sorted(VicEmergency.getItems(), key=compare): print(i.properties["sourceTitle"]) print(i.properties["category1"]) print(i.properties["location"]) print("{:.0f}km".format(geoDistance(i.getLocation(), melbourne) / 1000)) print("============================")
flexible
{ "blob_id": "920f00632599945397364dd0f52f21234e17f9ef", "index": 9445, "step-1": "<mask token>\n\n\ndef geoDistance(p1, p2):\n return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']\n\n\n<mask token>\n\n\ndef compare(f):\n return geoDistance(f.getLocation(), melbourne)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef geoDistance(p1, p2):\n return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']\n\n\n<mask token>\n\n\ndef compare(f):\n return geoDistance(f.getLocation(), melbourne)\n\n\nfor i in sorted(VicEmergency.getItems(), key=compare):\n print(i.properties['sourceTitle'])\n print(i.properties['category1'])\n print(i.properties['location'])\n print('{:.0f}km'.format(geoDistance(i.getLocation(), melbourne) / 1000))\n print('============================')\n", "step-3": "<mask token>\n\n\ndef geoDistance(p1, p2):\n return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']\n\n\nmelbourne = Point(144.962272, -37.812274)\n\n\ndef compare(f):\n return geoDistance(f.getLocation(), melbourne)\n\n\nfor i in sorted(VicEmergency.getItems(), key=compare):\n print(i.properties['sourceTitle'])\n print(i.properties['category1'])\n print(i.properties['location'])\n print('{:.0f}km'.format(geoDistance(i.getLocation(), melbourne) / 1000))\n print('============================')\n", "step-4": "from context import vicemergencyapi\nfrom vicemergencyapi.vicemergency import VicEmergency\nfrom geographiclib.geodesic import Geodesic\nfrom shapely.geometry import Point\n\n\ndef geoDistance(p1, p2):\n return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']\n\n\nmelbourne = Point(144.962272, -37.812274)\n\n\ndef compare(f):\n return geoDistance(f.getLocation(), melbourne)\n\n\nfor i in sorted(VicEmergency.getItems(), key=compare):\n print(i.properties['sourceTitle'])\n print(i.properties['category1'])\n print(i.properties['location'])\n print('{:.0f}km'.format(geoDistance(i.getLocation(), melbourne) / 1000))\n print('============================')\n", "step-5": "from context import vicemergencyapi\nfrom vicemergencyapi.vicemergency import VicEmergency\n\nfrom geographiclib.geodesic import Geodesic\nfrom shapely.geometry import Point\n\n\ndef geoDistance(p1, p2):\n return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']\n\n\nmelbourne = Point(144.962272, -37.812274)\n\ndef compare(f):\n return geoDistance(f.getLocation(), melbourne)\n\nfor i in sorted(VicEmergency.getItems(), key=compare):\n\n print(i.properties[\"sourceTitle\"])\n print(i.properties[\"category1\"])\n print(i.properties[\"location\"])\n print(\"{:.0f}km\".format(geoDistance(i.getLocation(), melbourne) / 1000))\n\n print(\"============================\")\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT ) <|reserved_special_token_1|> <|reserved_special_token_0|> urlpatterns = [path('user', include('user.urls')), path('order', include( 'order.urls')), path('shopcart/', OV.shopcart, name='shopcart'), path( 'product', include('product.urls')), path('', include('home.urls')), path('faq/', views.faq, name='faq'), path('admin/', admin.site.urls), path('ckeditor', include('ckeditor_uploader.urls')), path('about/', views.about, name='about'), path('contact/', views.contact, name= 'about'), path('search/', views.search, name='search'), path( 'search_auto', views.search_auto, name='search_auto'), path( 'category/<int:id>/<slug:slug>/', views.category_products, name= 'category_products'), path('product/<int:id>/<slug:slug>/', views. product_detail, name='product_detail'), path('lic/', views.lic, name= 'lic'), path('post/', views.post, name='post'), path('post/<int:id>/', views.post_detail, name='post_detail'), path('lic/<int:id>/', views. lic_detail, name='lic_detail')] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT ) <|reserved_special_token_1|> from django.conf import settings from django.conf.urls.static import static from django.contrib import admin from django.urls import path, include from home import views from order import views as OV urlpatterns = [path('user', include('user.urls')), path('order', include( 'order.urls')), path('shopcart/', OV.shopcart, name='shopcart'), path( 'product', include('product.urls')), path('', include('home.urls')), path('faq/', views.faq, name='faq'), path('admin/', admin.site.urls), path('ckeditor', include('ckeditor_uploader.urls')), path('about/', views.about, name='about'), path('contact/', views.contact, name= 'about'), path('search/', views.search, name='search'), path( 'search_auto', views.search_auto, name='search_auto'), path( 'category/<int:id>/<slug:slug>/', views.category_products, name= 'category_products'), path('product/<int:id>/<slug:slug>/', views. product_detail, name='product_detail'), path('lic/', views.lic, name= 'lic'), path('post/', views.post, name='post'), path('post/<int:id>/', views.post_detail, name='post_detail'), path('lic/<int:id>/', views. lic_detail, name='lic_detail')] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT ) <|reserved_special_token_1|> from django.conf import settings from django.conf.urls.static import static from django.contrib import admin from django.urls import path, include from home import views from order import views as OV urlpatterns = [ path('user', include('user.urls')), path('order', include('order.urls')), path('shopcart/', OV.shopcart, name='shopcart'), path('product',include('product.urls')), path('',include('home.urls')),# '' - bu home path('faq/', views.faq, name='faq'), path('admin/', admin.site.urls), path('ckeditor', include('ckeditor_uploader.urls')), path('about/', views.about, name='about'), path('contact/', views.contact, name='about'), path('search/', views.search,name='search'), path('search_auto', views.search_auto, name='search_auto'), path('category/<int:id>/<slug:slug>/', views.category_products, name='category_products'), path('product/<int:id>/<slug:slug>/',views.product_detail, name='product_detail'), path('lic/',views.lic,name='lic'), path('post/',views.post,name='post'), path('post/<int:id>/',views.post_detail, name='post_detail'), path('lic/<int:id>/',views.lic_detail, name='lic_detail'), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
flexible
{ "blob_id": "97cc29e0d54e5d5e05dff16c92ecc4046363185f", "index": 344, "step-1": "<mask token>\n", "step-2": "<mask token>\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n", "step-3": "<mask token>\nurlpatterns = [path('user', include('user.urls')), path('order', include(\n 'order.urls')), path('shopcart/', OV.shopcart, name='shopcart'), path(\n 'product', include('product.urls')), path('', include('home.urls')),\n path('faq/', views.faq, name='faq'), path('admin/', admin.site.urls),\n path('ckeditor', include('ckeditor_uploader.urls')), path('about/',\n views.about, name='about'), path('contact/', views.contact, name=\n 'about'), path('search/', views.search, name='search'), path(\n 'search_auto', views.search_auto, name='search_auto'), path(\n 'category/<int:id>/<slug:slug>/', views.category_products, name=\n 'category_products'), path('product/<int:id>/<slug:slug>/', views.\n product_detail, name='product_detail'), path('lic/', views.lic, name=\n 'lic'), path('post/', views.post, name='post'), path('post/<int:id>/',\n views.post_detail, name='post_detail'), path('lic/<int:id>/', views.\n lic_detail, name='lic_detail')]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n", "step-4": "from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom home import views\nfrom order import views as OV\nurlpatterns = [path('user', include('user.urls')), path('order', include(\n 'order.urls')), path('shopcart/', OV.shopcart, name='shopcart'), path(\n 'product', include('product.urls')), path('', include('home.urls')),\n path('faq/', views.faq, name='faq'), path('admin/', admin.site.urls),\n path('ckeditor', include('ckeditor_uploader.urls')), path('about/',\n views.about, name='about'), path('contact/', views.contact, name=\n 'about'), path('search/', views.search, name='search'), path(\n 'search_auto', views.search_auto, name='search_auto'), path(\n 'category/<int:id>/<slug:slug>/', views.category_products, name=\n 'category_products'), path('product/<int:id>/<slug:slug>/', views.\n product_detail, name='product_detail'), path('lic/', views.lic, name=\n 'lic'), path('post/', views.post, name='post'), path('post/<int:id>/',\n views.post_detail, name='post_detail'), path('lic/<int:id>/', views.\n lic_detail, name='lic_detail')]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n", "step-5": "from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom home import views\nfrom order import views as OV\n\nurlpatterns = [\n path('user', include('user.urls')),\n path('order', include('order.urls')),\n path('shopcart/', OV.shopcart, name='shopcart'),\n path('product',include('product.urls')),\n path('',include('home.urls')),# '' - bu home\n path('faq/', views.faq, name='faq'),\n path('admin/', admin.site.urls),\n path('ckeditor', include('ckeditor_uploader.urls')),\n path('about/', views.about, name='about'),\n path('contact/', views.contact, name='about'),\n path('search/', views.search,name='search'),\n path('search_auto', views.search_auto, name='search_auto'),\n path('category/<int:id>/<slug:slug>/', views.category_products, name='category_products'),\n path('product/<int:id>/<slug:slug>/',views.product_detail, name='product_detail'),\n path('lic/',views.lic,name='lic'),\n path('post/',views.post,name='post'),\n path('post/<int:id>/',views.post_detail, name='post_detail'),\n path('lic/<int:id>/',views.lic_detail, name='lic_detail'),\n\n\n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def test_mongo_logging_client_persists_log(): """ Test to see if the mongodb client logger can persist a log entry to the database """ error_message = 'This is a test message.' logger = LoggingService(console_output=True) result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message)) logger.log(LogEntry(LogLevel.WARN, __name__, error_message)) logger.log(LogEntry(LogLevel.INFO, __name__, error_message)) logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message)) assert result.message == error_message <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def test_mongo_logging_client_persists_log(): """ Test to see if the mongodb client logger can persist a log entry to the database """ error_message = 'This is a test message.' logger = LoggingService(console_output=True) result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message)) logger.log(LogEntry(LogLevel.WARN, __name__, error_message)) logger.log(LogEntry(LogLevel.INFO, __name__, error_message)) logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message)) assert result.message == error_message def tests_teardown(): drop_all_collections() teardown() <|reserved_special_token_1|> <|reserved_special_token_0|> @pytest.fixture(autouse=True) def setup(): register_test_db() register_test_injections() def test_mongo_logging_client_persists_log(): """ Test to see if the mongodb client logger can persist a log entry to the database """ error_message = 'This is a test message.' logger = LoggingService(console_output=True) result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message)) logger.log(LogEntry(LogLevel.WARN, __name__, error_message)) logger.log(LogEntry(LogLevel.INFO, __name__, error_message)) logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message)) assert result.message == error_message def tests_teardown(): drop_all_collections() teardown() <|reserved_special_token_1|> from modules.core.logging.logging_service import LoggingService from modules.core.logging.models import LogLevel, LogEntry import pytest from .setup import register_test_db, register_test_injections, teardown, drop_all_collections @pytest.fixture(autouse=True) def setup(): register_test_db() register_test_injections() def test_mongo_logging_client_persists_log(): """ Test to see if the mongodb client logger can persist a log entry to the database """ error_message = 'This is a test message.' logger = LoggingService(console_output=True) result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message)) logger.log(LogEntry(LogLevel.WARN, __name__, error_message)) logger.log(LogEntry(LogLevel.INFO, __name__, error_message)) logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message)) assert result.message == error_message def tests_teardown(): drop_all_collections() teardown() <|reserved_special_token_1|> from modules.core.logging.logging_service import LoggingService from modules.core.logging.models import LogLevel, LogEntry import pytest from .setup import register_test_db, register_test_injections, teardown,\ drop_all_collections @pytest.fixture(autouse=True) def setup(): register_test_db() register_test_injections() def test_mongo_logging_client_persists_log(): """ Test to see if the mongodb client logger can persist a log entry to the database """ error_message = "This is a test message." logger = LoggingService(console_output=True) result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message)) logger.log(LogEntry(LogLevel.WARN, __name__, error_message)) logger.log(LogEntry(LogLevel.INFO, __name__, error_message)) logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message)) assert result.message == error_message def tests_teardown(): drop_all_collections() teardown()
flexible
{ "blob_id": "a29cf9e7006d52cea8f5ccdcbc2087983ffa3ef3", "index": 2973, "step-1": "<mask token>\n\n\ndef test_mongo_logging_client_persists_log():\n \"\"\"\n Test to see if the mongodb client logger\n can persist a log entry to the database\n \"\"\"\n error_message = 'This is a test message.'\n logger = LoggingService(console_output=True)\n result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))\n logger.log(LogEntry(LogLevel.WARN, __name__, error_message))\n logger.log(LogEntry(LogLevel.INFO, __name__, error_message))\n logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message))\n assert result.message == error_message\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef test_mongo_logging_client_persists_log():\n \"\"\"\n Test to see if the mongodb client logger\n can persist a log entry to the database\n \"\"\"\n error_message = 'This is a test message.'\n logger = LoggingService(console_output=True)\n result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))\n logger.log(LogEntry(LogLevel.WARN, __name__, error_message))\n logger.log(LogEntry(LogLevel.INFO, __name__, error_message))\n logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message))\n assert result.message == error_message\n\n\ndef tests_teardown():\n drop_all_collections()\n teardown()\n", "step-3": "<mask token>\n\n\[email protected](autouse=True)\ndef setup():\n register_test_db()\n register_test_injections()\n\n\ndef test_mongo_logging_client_persists_log():\n \"\"\"\n Test to see if the mongodb client logger\n can persist a log entry to the database\n \"\"\"\n error_message = 'This is a test message.'\n logger = LoggingService(console_output=True)\n result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))\n logger.log(LogEntry(LogLevel.WARN, __name__, error_message))\n logger.log(LogEntry(LogLevel.INFO, __name__, error_message))\n logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message))\n assert result.message == error_message\n\n\ndef tests_teardown():\n drop_all_collections()\n teardown()\n", "step-4": "from modules.core.logging.logging_service import LoggingService\nfrom modules.core.logging.models import LogLevel, LogEntry\nimport pytest\nfrom .setup import register_test_db, register_test_injections, teardown, drop_all_collections\n\n\[email protected](autouse=True)\ndef setup():\n register_test_db()\n register_test_injections()\n\n\ndef test_mongo_logging_client_persists_log():\n \"\"\"\n Test to see if the mongodb client logger\n can persist a log entry to the database\n \"\"\"\n error_message = 'This is a test message.'\n logger = LoggingService(console_output=True)\n result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))\n logger.log(LogEntry(LogLevel.WARN, __name__, error_message))\n logger.log(LogEntry(LogLevel.INFO, __name__, error_message))\n logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message))\n assert result.message == error_message\n\n\ndef tests_teardown():\n drop_all_collections()\n teardown()\n", "step-5": "from modules.core.logging.logging_service import LoggingService\nfrom modules.core.logging.models import LogLevel, LogEntry\nimport pytest\nfrom .setup import register_test_db, register_test_injections, teardown,\\\n drop_all_collections\n\n\[email protected](autouse=True)\ndef setup():\n register_test_db()\n register_test_injections()\n\n\ndef test_mongo_logging_client_persists_log():\n \"\"\"\n Test to see if the mongodb client logger\n can persist a log entry to the database\n \"\"\"\n\n error_message = \"This is a test message.\"\n logger = LoggingService(console_output=True)\n\n result = logger.log(LogEntry(LogLevel.ERROR, __name__, error_message))\n logger.log(LogEntry(LogLevel.WARN, __name__, error_message))\n logger.log(LogEntry(LogLevel.INFO, __name__, error_message))\n logger.log(LogEntry(LogLevel.DEBUG, __name__, error_message))\n\n assert result.message == error_message\n\n\ndef tests_teardown():\n drop_all_collections()\n teardown()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> class AdmiravelMundoNovo(object): <|reserved_special_token_0|> <|reserved_special_token_0|> def transicao_estado(self, acao): if self._valor_estado == 2 and acao == 0: self._estado_6() elif self._valor_estado == 2 and acao == 1: self._estado_3() elif self._valor_estado in [1, 3, 4] and acao == 0: self._estado_2() elif self._valor_estado == 3 and acao == 1: self._estado_5() elif self._valor_estado == 2 and acao == 2: self._estado_4() elif self._valor_estado == 5 and acao == 1: self._estado_3() elif self._valor_estado == 6 and acao == 1: self._estado_7() elif self._valor_estado in [7, 8] and acao == 0: self._estado_6() elif self._valor_estado == 6 and acao == 2: self._estado_8() elif self._valor_estado in [6, 10, 11] and acao == 0: self._estado_9() elif self._valor_estado == 9 and acao == 1: self._estado_10() elif self._valor_estado == 9 and acao == 2: self._estado_11() elif self._valor_estado in [5, 9, 13] and acao == 0: self._estado_12() elif self._valor_estado == 12 and acao == 0: self._estado_13() elif self._valor_estado == 12 and acao == 1: self._estado_final() elif self._valor_estado == 9 and acao == 3: self._estado_6() elif self._valor_estado == 6 and acao == 3: self._estado_2() def _estado_1(self): self._reforco_imediato = self._estados_reforcos['estado_1'] self.reforco += self._reforco_imediato self._valor_estado = 1 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_1'] self._estado_acao = self._acao_textos['estado_1'] self._espaco_acoes = self._acao_dimensoes['estado_1'] def _estado_2(self): self._reforco_imediato = self._estados_reforcos['estado_2'] self.reforco += self._reforco_imediato self._valor_estado = 2 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_2'] self._estado_acao = self._acao_textos['estado_2'] self._espaco_acoes = self._acao_dimensoes['estado_2'] def _estado_3(self): self._reforco_imediato = self._estados_reforcos['estado_3'] self.reforco += self._reforco_imediato self._valor_estado = 3 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_3'] self._estado_acao = self._acao_textos['estado_3'] self._espaco_acoes = self._acao_dimensoes['estado_3'] <|reserved_special_token_0|> def _estado_5(self): self._reforco_imediato = self._estados_reforcos['estado_5'] self.reforco += self._reforco_imediato self._valor_estado = 5 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_5'] self._estado_acao = self._acao_textos['estado_5'] self._espaco_acoes = self._acao_dimensoes['estado_5'] def _estado_6(self): self._reforco_imediato = self._estados_reforcos['estado_6'] self.reforco += self._reforco_imediato self._valor_estado = 6 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_6'] self._estado_acao = self._acao_textos['estado_6'] self._espaco_acoes = self._acao_dimensoes['estado_6'] <|reserved_special_token_0|> <|reserved_special_token_0|> def _estado_9(self): self._reforco_imediato = self._estados_reforcos['estado_9'] self.reforco += self._reforco_imediato self._valor_estado = 9 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_9'] self._estado_acao = self._acao_textos['estado_9'] self._espaco_acoes = self._acao_dimensoes['estado_9'] def _estado_10(self): self._reforco_imediato = self._estados_reforcos['estado_10'] self.reforco += self._reforco_imediato self._valor_estado = 10 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_10'] self._estado_acao = self._acao_textos['estado_10'] self._espaco_acoes = self._acao_dimensoes['estado_10'] def _estado_11(self): self._reforco_imediato = self._estados_reforcos['estado_10'] self.reforco += self._reforco_imediato self._valor_estado = 11 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_11'] self._estado_acao = self._acao_textos['estado_10'] self._espaco_acoes = self._acao_dimensoes['estado_10'] def _estado_12(self): self._reforco_imediato = self._estados_reforcos['estado_12'] self.reforco += self._reforco_imediato self._valor_estado = 12 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_12'] self._estado_acao = self._acao_textos['estado_12'] self._espaco_acoes = self._acao_dimensoes['estado_12'] <|reserved_special_token_0|> def _estado_14(self): self._reforco_imediato = self._estados_reforcos['estado_14'] self.reforco -= self._reforco_imediato self._valor_estado = 14 self._finalizado = self._estados_finalizado['estado_14'] self._estado_texto = self._estados_texto['estado_14'] self._estado_acao = self._acao_textos['estado_14'] self._espaco_acoes = self._acao_dimensoes['estado_14'] <|reserved_special_token_0|> def _pacote_acoes(self): if self._valor_estado in [1, 4, 7, 8, 10, 11, 13]: return [0] elif self._valor_estado in [2]: return [0, 1, 2] elif self._valor_estado in [3, 5, 12]: return [0, 1] elif self._valor_estado in [9, 6]: return [0, 1, 2, 3] def checa_acao(self, acao): if acao in self._pacote_acoes(): return True else: return False def read_1(self): return (self._estado_texto, self._estado_acao, self._espaco_acoes, self._reforco_imediato, self._finalizado) def read(self): return self._estado_texto, self._estado_acao, self._espaco_acoes def imprime_acao(self, acoes): for cont, acao in enumerate(acoes): print('\t[{0}] {1}'.format(cont, acao)) def emulador(self, acao): if self._valor_estado == 2 and acao == 0: return self._estados_texto['estado_6'], self._acao_textos[ 'estado_6'], self._acao_dimensoes['estado_6' ], self._estados_reforcos['estado_6' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 2 and acao == 1: return self._estados_texto['estado_9'], self._acao_textos[ 'estado_9'], self._acao_dimensoes['estado_9' ], self._estados_reforcos['estado_9' ], self._estados_finalizado['estado_1'] elif self._valor_estado in [1, 3, 4] and acao == 0: return self._estados_texto['estado_2'], self._acao_textos[ 'estado_2'], self._acao_dimensoes['estado_2' ], self._estados_reforcos['estado_2' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 3 and acao == 1: return self._estados_texto['estado_5'], self._acao_textos[ 'estado_5'], self._acao_dimensoes['estado_5' ], self._estados_reforcos['estado_5' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 2 and acao == 2: return self._estados_texto['estado_4'], self._acao_textos[ 'estado_4'], self._acao_dimensoes['estado_4' ], self._estados_reforcos['estado_4' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 5 and acao == 1: return self._estados_texto['estado_9'], self._acao_textos[ 'estado_9'], self._acao_dimensoes['estado_9' ], self._estados_reforcos['estado_9' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 6 and acao == 1: return self._estados_texto['estado_7'], self._acao_textos[ 'estado_7'], self._acao_dimensoes['estado_7' ], self._estados_reforcos['estado_7' ], self._estados_finalizado['estado_1'] elif self._valor_estado in [7, 8] and acao == 0: return self._estados_texto['estado_6'], self._acao_textos[ 'estado_6'], self._acao_dimensoes['estado_6' ], self._estados_reforcos['estado_6' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 6 and acao == 2: return self._estados_texto['estado_8'], self._acao_textos[ 'estado_7'], self._acao_dimensoes['estado_7' ], self._estados_reforcos['estado_7' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 9 and acao == 1: return self._estados_texto['estado_10'], self._acao_textos[ 'estado_10'], self._acao_dimensoes['estado_10' ], self._estados_reforcos['estado_10' ], self._estados_finalizado['estado_1'] elif self._valor_estado in [6, 10, 11] and acao == 0: return self._estados_texto['estado_9'], self._acao_textos[ 'estado_9'], self._acao_dimensoes['estado_9' ], self._estados_reforcos['estado_9' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 9 and acao == 2: return self._estados_texto['estado_11'], self._acao_textos[ 'estado_10'], self._acao_dimensoes['estado_10' ], self._estados_reforcos['estado_10' ], self._estados_finalizado['estado_1'] elif self._valor_estado in [5, 9, 13] and acao == 0: return self._estados_texto['estado_12'], self._acao_textos[ 'estado_12'], self._acao_dimensoes['estado_12' ], self._estados_reforcos['estado_12' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 12 and acao == 0: return self._estados_texto['estado_13'], self._acao_textos[ 'estado_13'], self._acao_dimensoes['estado_13' ], self._estados_reforcos['estado_13' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 12 and acao == 1: return self._estados_texto['estado_final'], self._acao_textos[ 'estado_final'], self._acao_dimensoes['estado_final' ], self._estados_reforcos['estado_final' ], self._estados_finalizado['estado_final'] elif self._valor_estado == 9 and acao == 3: return self._estados_texto['estado_6'], self._acao_textos[ 'estado_6'], self._acao_dimensoes['estado_6' ], self._estados_reforcos['estado_6' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 6 and acao == 3: return self._estados_texto['estado_2'], self._acao_textos[ 'estado_2'], self._acao_dimensoes['estado_2' ], self._estados_reforcos['estado_2' ], self._estados_finalizado['estado_1'] <|reserved_special_token_1|> <|reserved_special_token_0|> class AdmiravelMundoNovo(object): def __init__(self): self.reforco = 0 self._checa_estado = False self._estado_texto = None self._estado_acao = None self._finalizado = False self._espaco_acoes = None self._estados_texto = ESTADOS self._acao_textos = ACOES self._acao_dimensoes = DIMENSOES self._estados_reforcos = REFORCOS self._estados_finalizado = FINALIZADO self._valores_estados_iniciais = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] print('\tO objetivo do jogo é coletar a chave preciosa de ouro.' + """. Para tal, você precisa vasculhar a Ilha da Fantasia.""") print() self._escolha_estado_inicial() <|reserved_special_token_0|> def transicao_estado(self, acao): if self._valor_estado == 2 and acao == 0: self._estado_6() elif self._valor_estado == 2 and acao == 1: self._estado_3() elif self._valor_estado in [1, 3, 4] and acao == 0: self._estado_2() elif self._valor_estado == 3 and acao == 1: self._estado_5() elif self._valor_estado == 2 and acao == 2: self._estado_4() elif self._valor_estado == 5 and acao == 1: self._estado_3() elif self._valor_estado == 6 and acao == 1: self._estado_7() elif self._valor_estado in [7, 8] and acao == 0: self._estado_6() elif self._valor_estado == 6 and acao == 2: self._estado_8() elif self._valor_estado in [6, 10, 11] and acao == 0: self._estado_9() elif self._valor_estado == 9 and acao == 1: self._estado_10() elif self._valor_estado == 9 and acao == 2: self._estado_11() elif self._valor_estado in [5, 9, 13] and acao == 0: self._estado_12() elif self._valor_estado == 12 and acao == 0: self._estado_13() elif self._valor_estado == 12 and acao == 1: self._estado_final() elif self._valor_estado == 9 and acao == 3: self._estado_6() elif self._valor_estado == 6 and acao == 3: self._estado_2() def _estado_1(self): self._reforco_imediato = self._estados_reforcos['estado_1'] self.reforco += self._reforco_imediato self._valor_estado = 1 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_1'] self._estado_acao = self._acao_textos['estado_1'] self._espaco_acoes = self._acao_dimensoes['estado_1'] def _estado_2(self): self._reforco_imediato = self._estados_reforcos['estado_2'] self.reforco += self._reforco_imediato self._valor_estado = 2 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_2'] self._estado_acao = self._acao_textos['estado_2'] self._espaco_acoes = self._acao_dimensoes['estado_2'] def _estado_3(self): self._reforco_imediato = self._estados_reforcos['estado_3'] self.reforco += self._reforco_imediato self._valor_estado = 3 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_3'] self._estado_acao = self._acao_textos['estado_3'] self._espaco_acoes = self._acao_dimensoes['estado_3'] <|reserved_special_token_0|> def _estado_5(self): self._reforco_imediato = self._estados_reforcos['estado_5'] self.reforco += self._reforco_imediato self._valor_estado = 5 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_5'] self._estado_acao = self._acao_textos['estado_5'] self._espaco_acoes = self._acao_dimensoes['estado_5'] def _estado_6(self): self._reforco_imediato = self._estados_reforcos['estado_6'] self.reforco += self._reforco_imediato self._valor_estado = 6 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_6'] self._estado_acao = self._acao_textos['estado_6'] self._espaco_acoes = self._acao_dimensoes['estado_6'] <|reserved_special_token_0|> def _estado_8(self): self._reforco_imediato = self._estados_reforcos['estado_7'] self.reforco += self._reforco_imediato self._valor_estado = 8 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_8'] self._estado_acao = self._acao_textos['estado_7'] self._espaco_acoes = self._acao_dimensoes['estado_7'] def _estado_9(self): self._reforco_imediato = self._estados_reforcos['estado_9'] self.reforco += self._reforco_imediato self._valor_estado = 9 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_9'] self._estado_acao = self._acao_textos['estado_9'] self._espaco_acoes = self._acao_dimensoes['estado_9'] def _estado_10(self): self._reforco_imediato = self._estados_reforcos['estado_10'] self.reforco += self._reforco_imediato self._valor_estado = 10 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_10'] self._estado_acao = self._acao_textos['estado_10'] self._espaco_acoes = self._acao_dimensoes['estado_10'] def _estado_11(self): self._reforco_imediato = self._estados_reforcos['estado_10'] self.reforco += self._reforco_imediato self._valor_estado = 11 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_11'] self._estado_acao = self._acao_textos['estado_10'] self._espaco_acoes = self._acao_dimensoes['estado_10'] def _estado_12(self): self._reforco_imediato = self._estados_reforcos['estado_12'] self.reforco += self._reforco_imediato self._valor_estado = 12 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_12'] self._estado_acao = self._acao_textos['estado_12'] self._espaco_acoes = self._acao_dimensoes['estado_12'] <|reserved_special_token_0|> def _estado_14(self): self._reforco_imediato = self._estados_reforcos['estado_14'] self.reforco -= self._reforco_imediato self._valor_estado = 14 self._finalizado = self._estados_finalizado['estado_14'] self._estado_texto = self._estados_texto['estado_14'] self._estado_acao = self._acao_textos['estado_14'] self._espaco_acoes = self._acao_dimensoes['estado_14'] def _estado_final(self): self._reforco_imediato = self._estados_reforcos['estado_final'] self.reforco += self._reforco_imediato self._finalizado = self._estados_finalizado['estado_final'] self._estado_texto = self._estados_texto['estado_final'] print('\tReforço acumulado de {0}'.format(self.reforco)) self._estado_acao = '' def _pacote_acoes(self): if self._valor_estado in [1, 4, 7, 8, 10, 11, 13]: return [0] elif self._valor_estado in [2]: return [0, 1, 2] elif self._valor_estado in [3, 5, 12]: return [0, 1] elif self._valor_estado in [9, 6]: return [0, 1, 2, 3] def checa_acao(self, acao): if acao in self._pacote_acoes(): return True else: return False def read_1(self): return (self._estado_texto, self._estado_acao, self._espaco_acoes, self._reforco_imediato, self._finalizado) def read(self): return self._estado_texto, self._estado_acao, self._espaco_acoes def imprime_acao(self, acoes): for cont, acao in enumerate(acoes): print('\t[{0}] {1}'.format(cont, acao)) def emulador(self, acao): if self._valor_estado == 2 and acao == 0: return self._estados_texto['estado_6'], self._acao_textos[ 'estado_6'], self._acao_dimensoes['estado_6' ], self._estados_reforcos['estado_6' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 2 and acao == 1: return self._estados_texto['estado_9'], self._acao_textos[ 'estado_9'], self._acao_dimensoes['estado_9' ], self._estados_reforcos['estado_9' ], self._estados_finalizado['estado_1'] elif self._valor_estado in [1, 3, 4] and acao == 0: return self._estados_texto['estado_2'], self._acao_textos[ 'estado_2'], self._acao_dimensoes['estado_2' ], self._estados_reforcos['estado_2' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 3 and acao == 1: return self._estados_texto['estado_5'], self._acao_textos[ 'estado_5'], self._acao_dimensoes['estado_5' ], self._estados_reforcos['estado_5' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 2 and acao == 2: return self._estados_texto['estado_4'], self._acao_textos[ 'estado_4'], self._acao_dimensoes['estado_4' ], self._estados_reforcos['estado_4' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 5 and acao == 1: return self._estados_texto['estado_9'], self._acao_textos[ 'estado_9'], self._acao_dimensoes['estado_9' ], self._estados_reforcos['estado_9' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 6 and acao == 1: return self._estados_texto['estado_7'], self._acao_textos[ 'estado_7'], self._acao_dimensoes['estado_7' ], self._estados_reforcos['estado_7' ], self._estados_finalizado['estado_1'] elif self._valor_estado in [7, 8] and acao == 0: return self._estados_texto['estado_6'], self._acao_textos[ 'estado_6'], self._acao_dimensoes['estado_6' ], self._estados_reforcos['estado_6' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 6 and acao == 2: return self._estados_texto['estado_8'], self._acao_textos[ 'estado_7'], self._acao_dimensoes['estado_7' ], self._estados_reforcos['estado_7' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 9 and acao == 1: return self._estados_texto['estado_10'], self._acao_textos[ 'estado_10'], self._acao_dimensoes['estado_10' ], self._estados_reforcos['estado_10' ], self._estados_finalizado['estado_1'] elif self._valor_estado in [6, 10, 11] and acao == 0: return self._estados_texto['estado_9'], self._acao_textos[ 'estado_9'], self._acao_dimensoes['estado_9' ], self._estados_reforcos['estado_9' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 9 and acao == 2: return self._estados_texto['estado_11'], self._acao_textos[ 'estado_10'], self._acao_dimensoes['estado_10' ], self._estados_reforcos['estado_10' ], self._estados_finalizado['estado_1'] elif self._valor_estado in [5, 9, 13] and acao == 0: return self._estados_texto['estado_12'], self._acao_textos[ 'estado_12'], self._acao_dimensoes['estado_12' ], self._estados_reforcos['estado_12' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 12 and acao == 0: return self._estados_texto['estado_13'], self._acao_textos[ 'estado_13'], self._acao_dimensoes['estado_13' ], self._estados_reforcos['estado_13' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 12 and acao == 1: return self._estados_texto['estado_final'], self._acao_textos[ 'estado_final'], self._acao_dimensoes['estado_final' ], self._estados_reforcos['estado_final' ], self._estados_finalizado['estado_final'] elif self._valor_estado == 9 and acao == 3: return self._estados_texto['estado_6'], self._acao_textos[ 'estado_6'], self._acao_dimensoes['estado_6' ], self._estados_reforcos['estado_6' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 6 and acao == 3: return self._estados_texto['estado_2'], self._acao_textos[ 'estado_2'], self._acao_dimensoes['estado_2' ], self._estados_reforcos['estado_2' ], self._estados_finalizado['estado_1'] <|reserved_special_token_1|> <|reserved_special_token_0|> class AdmiravelMundoNovo(object): def __init__(self): self.reforco = 0 self._checa_estado = False self._estado_texto = None self._estado_acao = None self._finalizado = False self._espaco_acoes = None self._estados_texto = ESTADOS self._acao_textos = ACOES self._acao_dimensoes = DIMENSOES self._estados_reforcos = REFORCOS self._estados_finalizado = FINALIZADO self._valores_estados_iniciais = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] print('\tO objetivo do jogo é coletar a chave preciosa de ouro.' + """. Para tal, você precisa vasculhar a Ilha da Fantasia.""") print() self._escolha_estado_inicial() <|reserved_special_token_0|> def transicao_estado(self, acao): if self._valor_estado == 2 and acao == 0: self._estado_6() elif self._valor_estado == 2 and acao == 1: self._estado_3() elif self._valor_estado in [1, 3, 4] and acao == 0: self._estado_2() elif self._valor_estado == 3 and acao == 1: self._estado_5() elif self._valor_estado == 2 and acao == 2: self._estado_4() elif self._valor_estado == 5 and acao == 1: self._estado_3() elif self._valor_estado == 6 and acao == 1: self._estado_7() elif self._valor_estado in [7, 8] and acao == 0: self._estado_6() elif self._valor_estado == 6 and acao == 2: self._estado_8() elif self._valor_estado in [6, 10, 11] and acao == 0: self._estado_9() elif self._valor_estado == 9 and acao == 1: self._estado_10() elif self._valor_estado == 9 and acao == 2: self._estado_11() elif self._valor_estado in [5, 9, 13] and acao == 0: self._estado_12() elif self._valor_estado == 12 and acao == 0: self._estado_13() elif self._valor_estado == 12 and acao == 1: self._estado_final() elif self._valor_estado == 9 and acao == 3: self._estado_6() elif self._valor_estado == 6 and acao == 3: self._estado_2() def _estado_1(self): self._reforco_imediato = self._estados_reforcos['estado_1'] self.reforco += self._reforco_imediato self._valor_estado = 1 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_1'] self._estado_acao = self._acao_textos['estado_1'] self._espaco_acoes = self._acao_dimensoes['estado_1'] def _estado_2(self): self._reforco_imediato = self._estados_reforcos['estado_2'] self.reforco += self._reforco_imediato self._valor_estado = 2 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_2'] self._estado_acao = self._acao_textos['estado_2'] self._espaco_acoes = self._acao_dimensoes['estado_2'] def _estado_3(self): self._reforco_imediato = self._estados_reforcos['estado_3'] self.reforco += self._reforco_imediato self._valor_estado = 3 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_3'] self._estado_acao = self._acao_textos['estado_3'] self._espaco_acoes = self._acao_dimensoes['estado_3'] def _estado_4(self): self._reforco_imediato = self._estados_reforcos['estado_4'] self.reforco += self._reforco_imediato self._valor_estado = 4 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_4'] self._estado_acao = self._acao_textos['estado_4'] self._espaco_acoes = self._acao_dimensoes['estado_4'] def _estado_5(self): self._reforco_imediato = self._estados_reforcos['estado_5'] self.reforco += self._reforco_imediato self._valor_estado = 5 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_5'] self._estado_acao = self._acao_textos['estado_5'] self._espaco_acoes = self._acao_dimensoes['estado_5'] def _estado_6(self): self._reforco_imediato = self._estados_reforcos['estado_6'] self.reforco += self._reforco_imediato self._valor_estado = 6 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_6'] self._estado_acao = self._acao_textos['estado_6'] self._espaco_acoes = self._acao_dimensoes['estado_6'] def _estado_7(self): self._reforco_imediato = self._estados_reforcos['estado_7'] self.reforco += self._reforco_imediato self._valor_estado = 7 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_7'] self._estado_acao = self._acao_textos['estado_7'] self._espaco_acoes = self._acao_dimensoes['estado_7'] def _estado_8(self): self._reforco_imediato = self._estados_reforcos['estado_7'] self.reforco += self._reforco_imediato self._valor_estado = 8 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_8'] self._estado_acao = self._acao_textos['estado_7'] self._espaco_acoes = self._acao_dimensoes['estado_7'] def _estado_9(self): self._reforco_imediato = self._estados_reforcos['estado_9'] self.reforco += self._reforco_imediato self._valor_estado = 9 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_9'] self._estado_acao = self._acao_textos['estado_9'] self._espaco_acoes = self._acao_dimensoes['estado_9'] def _estado_10(self): self._reforco_imediato = self._estados_reforcos['estado_10'] self.reforco += self._reforco_imediato self._valor_estado = 10 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_10'] self._estado_acao = self._acao_textos['estado_10'] self._espaco_acoes = self._acao_dimensoes['estado_10'] def _estado_11(self): self._reforco_imediato = self._estados_reforcos['estado_10'] self.reforco += self._reforco_imediato self._valor_estado = 11 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_11'] self._estado_acao = self._acao_textos['estado_10'] self._espaco_acoes = self._acao_dimensoes['estado_10'] def _estado_12(self): self._reforco_imediato = self._estados_reforcos['estado_12'] self.reforco += self._reforco_imediato self._valor_estado = 12 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_12'] self._estado_acao = self._acao_textos['estado_12'] self._espaco_acoes = self._acao_dimensoes['estado_12'] <|reserved_special_token_0|> def _estado_14(self): self._reforco_imediato = self._estados_reforcos['estado_14'] self.reforco -= self._reforco_imediato self._valor_estado = 14 self._finalizado = self._estados_finalizado['estado_14'] self._estado_texto = self._estados_texto['estado_14'] self._estado_acao = self._acao_textos['estado_14'] self._espaco_acoes = self._acao_dimensoes['estado_14'] def _estado_final(self): self._reforco_imediato = self._estados_reforcos['estado_final'] self.reforco += self._reforco_imediato self._finalizado = self._estados_finalizado['estado_final'] self._estado_texto = self._estados_texto['estado_final'] print('\tReforço acumulado de {0}'.format(self.reforco)) self._estado_acao = '' def _pacote_acoes(self): if self._valor_estado in [1, 4, 7, 8, 10, 11, 13]: return [0] elif self._valor_estado in [2]: return [0, 1, 2] elif self._valor_estado in [3, 5, 12]: return [0, 1] elif self._valor_estado in [9, 6]: return [0, 1, 2, 3] def checa_acao(self, acao): if acao in self._pacote_acoes(): return True else: return False def read_1(self): return (self._estado_texto, self._estado_acao, self._espaco_acoes, self._reforco_imediato, self._finalizado) def read(self): return self._estado_texto, self._estado_acao, self._espaco_acoes def imprime_acao(self, acoes): for cont, acao in enumerate(acoes): print('\t[{0}] {1}'.format(cont, acao)) def emulador(self, acao): if self._valor_estado == 2 and acao == 0: return self._estados_texto['estado_6'], self._acao_textos[ 'estado_6'], self._acao_dimensoes['estado_6' ], self._estados_reforcos['estado_6' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 2 and acao == 1: return self._estados_texto['estado_9'], self._acao_textos[ 'estado_9'], self._acao_dimensoes['estado_9' ], self._estados_reforcos['estado_9' ], self._estados_finalizado['estado_1'] elif self._valor_estado in [1, 3, 4] and acao == 0: return self._estados_texto['estado_2'], self._acao_textos[ 'estado_2'], self._acao_dimensoes['estado_2' ], self._estados_reforcos['estado_2' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 3 and acao == 1: return self._estados_texto['estado_5'], self._acao_textos[ 'estado_5'], self._acao_dimensoes['estado_5' ], self._estados_reforcos['estado_5' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 2 and acao == 2: return self._estados_texto['estado_4'], self._acao_textos[ 'estado_4'], self._acao_dimensoes['estado_4' ], self._estados_reforcos['estado_4' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 5 and acao == 1: return self._estados_texto['estado_9'], self._acao_textos[ 'estado_9'], self._acao_dimensoes['estado_9' ], self._estados_reforcos['estado_9' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 6 and acao == 1: return self._estados_texto['estado_7'], self._acao_textos[ 'estado_7'], self._acao_dimensoes['estado_7' ], self._estados_reforcos['estado_7' ], self._estados_finalizado['estado_1'] elif self._valor_estado in [7, 8] and acao == 0: return self._estados_texto['estado_6'], self._acao_textos[ 'estado_6'], self._acao_dimensoes['estado_6' ], self._estados_reforcos['estado_6' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 6 and acao == 2: return self._estados_texto['estado_8'], self._acao_textos[ 'estado_7'], self._acao_dimensoes['estado_7' ], self._estados_reforcos['estado_7' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 9 and acao == 1: return self._estados_texto['estado_10'], self._acao_textos[ 'estado_10'], self._acao_dimensoes['estado_10' ], self._estados_reforcos['estado_10' ], self._estados_finalizado['estado_1'] elif self._valor_estado in [6, 10, 11] and acao == 0: return self._estados_texto['estado_9'], self._acao_textos[ 'estado_9'], self._acao_dimensoes['estado_9' ], self._estados_reforcos['estado_9' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 9 and acao == 2: return self._estados_texto['estado_11'], self._acao_textos[ 'estado_10'], self._acao_dimensoes['estado_10' ], self._estados_reforcos['estado_10' ], self._estados_finalizado['estado_1'] elif self._valor_estado in [5, 9, 13] and acao == 0: return self._estados_texto['estado_12'], self._acao_textos[ 'estado_12'], self._acao_dimensoes['estado_12' ], self._estados_reforcos['estado_12' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 12 and acao == 0: return self._estados_texto['estado_13'], self._acao_textos[ 'estado_13'], self._acao_dimensoes['estado_13' ], self._estados_reforcos['estado_13' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 12 and acao == 1: return self._estados_texto['estado_final'], self._acao_textos[ 'estado_final'], self._acao_dimensoes['estado_final' ], self._estados_reforcos['estado_final' ], self._estados_finalizado['estado_final'] elif self._valor_estado == 9 and acao == 3: return self._estados_texto['estado_6'], self._acao_textos[ 'estado_6'], self._acao_dimensoes['estado_6' ], self._estados_reforcos['estado_6' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 6 and acao == 3: return self._estados_texto['estado_2'], self._acao_textos[ 'estado_2'], self._acao_dimensoes['estado_2' ], self._estados_reforcos['estado_2' ], self._estados_finalizado['estado_1'] <|reserved_special_token_1|> <|reserved_special_token_0|> class AdmiravelMundoNovo(object): def __init__(self): self.reforco = 0 self._checa_estado = False self._estado_texto = None self._estado_acao = None self._finalizado = False self._espaco_acoes = None self._estados_texto = ESTADOS self._acao_textos = ACOES self._acao_dimensoes = DIMENSOES self._estados_reforcos = REFORCOS self._estados_finalizado = FINALIZADO self._valores_estados_iniciais = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] print('\tO objetivo do jogo é coletar a chave preciosa de ouro.' + """. Para tal, você precisa vasculhar a Ilha da Fantasia.""") print() self._escolha_estado_inicial() def _escolha_estado_inicial(self): escolha = random.choice(self._valores_estados_iniciais) if escolha == 1: self._estado_1() elif escolha == 2: self._estado_2() elif escolha == 3: self._estado_3() elif escolha == 4: self._estado_4() elif escolha == 5: self._estado_5() elif escolha == 6: self._estado_6() elif escolha == 7: self._estado_7() elif escolha == 8: self._estado_8() elif escolha == 9: self._estado_9() elif escolha == 10: self._estado_10() elif escolha == 11: self._estado_11() elif escolha == 12: self._estado_12() elif escolha == 13: self._estado_13() elif escolha == 14: self._estado_14() def transicao_estado(self, acao): if self._valor_estado == 2 and acao == 0: self._estado_6() elif self._valor_estado == 2 and acao == 1: self._estado_3() elif self._valor_estado in [1, 3, 4] and acao == 0: self._estado_2() elif self._valor_estado == 3 and acao == 1: self._estado_5() elif self._valor_estado == 2 and acao == 2: self._estado_4() elif self._valor_estado == 5 and acao == 1: self._estado_3() elif self._valor_estado == 6 and acao == 1: self._estado_7() elif self._valor_estado in [7, 8] and acao == 0: self._estado_6() elif self._valor_estado == 6 and acao == 2: self._estado_8() elif self._valor_estado in [6, 10, 11] and acao == 0: self._estado_9() elif self._valor_estado == 9 and acao == 1: self._estado_10() elif self._valor_estado == 9 and acao == 2: self._estado_11() elif self._valor_estado in [5, 9, 13] and acao == 0: self._estado_12() elif self._valor_estado == 12 and acao == 0: self._estado_13() elif self._valor_estado == 12 and acao == 1: self._estado_final() elif self._valor_estado == 9 and acao == 3: self._estado_6() elif self._valor_estado == 6 and acao == 3: self._estado_2() def _estado_1(self): self._reforco_imediato = self._estados_reforcos['estado_1'] self.reforco += self._reforco_imediato self._valor_estado = 1 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_1'] self._estado_acao = self._acao_textos['estado_1'] self._espaco_acoes = self._acao_dimensoes['estado_1'] def _estado_2(self): self._reforco_imediato = self._estados_reforcos['estado_2'] self.reforco += self._reforco_imediato self._valor_estado = 2 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_2'] self._estado_acao = self._acao_textos['estado_2'] self._espaco_acoes = self._acao_dimensoes['estado_2'] def _estado_3(self): self._reforco_imediato = self._estados_reforcos['estado_3'] self.reforco += self._reforco_imediato self._valor_estado = 3 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_3'] self._estado_acao = self._acao_textos['estado_3'] self._espaco_acoes = self._acao_dimensoes['estado_3'] def _estado_4(self): self._reforco_imediato = self._estados_reforcos['estado_4'] self.reforco += self._reforco_imediato self._valor_estado = 4 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_4'] self._estado_acao = self._acao_textos['estado_4'] self._espaco_acoes = self._acao_dimensoes['estado_4'] def _estado_5(self): self._reforco_imediato = self._estados_reforcos['estado_5'] self.reforco += self._reforco_imediato self._valor_estado = 5 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_5'] self._estado_acao = self._acao_textos['estado_5'] self._espaco_acoes = self._acao_dimensoes['estado_5'] def _estado_6(self): self._reforco_imediato = self._estados_reforcos['estado_6'] self.reforco += self._reforco_imediato self._valor_estado = 6 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_6'] self._estado_acao = self._acao_textos['estado_6'] self._espaco_acoes = self._acao_dimensoes['estado_6'] def _estado_7(self): self._reforco_imediato = self._estados_reforcos['estado_7'] self.reforco += self._reforco_imediato self._valor_estado = 7 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_7'] self._estado_acao = self._acao_textos['estado_7'] self._espaco_acoes = self._acao_dimensoes['estado_7'] def _estado_8(self): self._reforco_imediato = self._estados_reforcos['estado_7'] self.reforco += self._reforco_imediato self._valor_estado = 8 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_8'] self._estado_acao = self._acao_textos['estado_7'] self._espaco_acoes = self._acao_dimensoes['estado_7'] def _estado_9(self): self._reforco_imediato = self._estados_reforcos['estado_9'] self.reforco += self._reforco_imediato self._valor_estado = 9 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_9'] self._estado_acao = self._acao_textos['estado_9'] self._espaco_acoes = self._acao_dimensoes['estado_9'] def _estado_10(self): self._reforco_imediato = self._estados_reforcos['estado_10'] self.reforco += self._reforco_imediato self._valor_estado = 10 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_10'] self._estado_acao = self._acao_textos['estado_10'] self._espaco_acoes = self._acao_dimensoes['estado_10'] def _estado_11(self): self._reforco_imediato = self._estados_reforcos['estado_10'] self.reforco += self._reforco_imediato self._valor_estado = 11 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_11'] self._estado_acao = self._acao_textos['estado_10'] self._espaco_acoes = self._acao_dimensoes['estado_10'] def _estado_12(self): self._reforco_imediato = self._estados_reforcos['estado_12'] self.reforco += self._reforco_imediato self._valor_estado = 12 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_12'] self._estado_acao = self._acao_textos['estado_12'] self._espaco_acoes = self._acao_dimensoes['estado_12'] def _estado_13(self): self._reforco_imediato = self._estados_reforcos['estado_13'] self.reforco -= self._reforco_imediato self._valor_estado = 13 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_13'] self._estado_acao = self._acao_textos['estado_13'] self._espaco_acoes = self._acao_dimensoes['estado_13'] def _estado_14(self): self._reforco_imediato = self._estados_reforcos['estado_14'] self.reforco -= self._reforco_imediato self._valor_estado = 14 self._finalizado = self._estados_finalizado['estado_14'] self._estado_texto = self._estados_texto['estado_14'] self._estado_acao = self._acao_textos['estado_14'] self._espaco_acoes = self._acao_dimensoes['estado_14'] def _estado_final(self): self._reforco_imediato = self._estados_reforcos['estado_final'] self.reforco += self._reforco_imediato self._finalizado = self._estados_finalizado['estado_final'] self._estado_texto = self._estados_texto['estado_final'] print('\tReforço acumulado de {0}'.format(self.reforco)) self._estado_acao = '' def _pacote_acoes(self): if self._valor_estado in [1, 4, 7, 8, 10, 11, 13]: return [0] elif self._valor_estado in [2]: return [0, 1, 2] elif self._valor_estado in [3, 5, 12]: return [0, 1] elif self._valor_estado in [9, 6]: return [0, 1, 2, 3] def checa_acao(self, acao): if acao in self._pacote_acoes(): return True else: return False def read_1(self): return (self._estado_texto, self._estado_acao, self._espaco_acoes, self._reforco_imediato, self._finalizado) def read(self): return self._estado_texto, self._estado_acao, self._espaco_acoes def imprime_acao(self, acoes): for cont, acao in enumerate(acoes): print('\t[{0}] {1}'.format(cont, acao)) def emulador(self, acao): if self._valor_estado == 2 and acao == 0: return self._estados_texto['estado_6'], self._acao_textos[ 'estado_6'], self._acao_dimensoes['estado_6' ], self._estados_reforcos['estado_6' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 2 and acao == 1: return self._estados_texto['estado_9'], self._acao_textos[ 'estado_9'], self._acao_dimensoes['estado_9' ], self._estados_reforcos['estado_9' ], self._estados_finalizado['estado_1'] elif self._valor_estado in [1, 3, 4] and acao == 0: return self._estados_texto['estado_2'], self._acao_textos[ 'estado_2'], self._acao_dimensoes['estado_2' ], self._estados_reforcos['estado_2' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 3 and acao == 1: return self._estados_texto['estado_5'], self._acao_textos[ 'estado_5'], self._acao_dimensoes['estado_5' ], self._estados_reforcos['estado_5' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 2 and acao == 2: return self._estados_texto['estado_4'], self._acao_textos[ 'estado_4'], self._acao_dimensoes['estado_4' ], self._estados_reforcos['estado_4' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 5 and acao == 1: return self._estados_texto['estado_9'], self._acao_textos[ 'estado_9'], self._acao_dimensoes['estado_9' ], self._estados_reforcos['estado_9' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 6 and acao == 1: return self._estados_texto['estado_7'], self._acao_textos[ 'estado_7'], self._acao_dimensoes['estado_7' ], self._estados_reforcos['estado_7' ], self._estados_finalizado['estado_1'] elif self._valor_estado in [7, 8] and acao == 0: return self._estados_texto['estado_6'], self._acao_textos[ 'estado_6'], self._acao_dimensoes['estado_6' ], self._estados_reforcos['estado_6' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 6 and acao == 2: return self._estados_texto['estado_8'], self._acao_textos[ 'estado_7'], self._acao_dimensoes['estado_7' ], self._estados_reforcos['estado_7' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 9 and acao == 1: return self._estados_texto['estado_10'], self._acao_textos[ 'estado_10'], self._acao_dimensoes['estado_10' ], self._estados_reforcos['estado_10' ], self._estados_finalizado['estado_1'] elif self._valor_estado in [6, 10, 11] and acao == 0: return self._estados_texto['estado_9'], self._acao_textos[ 'estado_9'], self._acao_dimensoes['estado_9' ], self._estados_reforcos['estado_9' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 9 and acao == 2: return self._estados_texto['estado_11'], self._acao_textos[ 'estado_10'], self._acao_dimensoes['estado_10' ], self._estados_reforcos['estado_10' ], self._estados_finalizado['estado_1'] elif self._valor_estado in [5, 9, 13] and acao == 0: return self._estados_texto['estado_12'], self._acao_textos[ 'estado_12'], self._acao_dimensoes['estado_12' ], self._estados_reforcos['estado_12' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 12 and acao == 0: return self._estados_texto['estado_13'], self._acao_textos[ 'estado_13'], self._acao_dimensoes['estado_13' ], self._estados_reforcos['estado_13' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 12 and acao == 1: return self._estados_texto['estado_final'], self._acao_textos[ 'estado_final'], self._acao_dimensoes['estado_final' ], self._estados_reforcos['estado_final' ], self._estados_finalizado['estado_final'] elif self._valor_estado == 9 and acao == 3: return self._estados_texto['estado_6'], self._acao_textos[ 'estado_6'], self._acao_dimensoes['estado_6' ], self._estados_reforcos['estado_6' ], self._estados_finalizado['estado_1'] elif self._valor_estado == 6 and acao == 3: return self._estados_texto['estado_2'], self._acao_textos[ 'estado_2'], self._acao_dimensoes['estado_2' ], self._estados_reforcos['estado_2' ], self._estados_finalizado['estado_1'] <|reserved_special_token_1|> """ \tSeja bem-vindo ao Admirável Mundo Novo! \tO objetivo do jogo é dar suporte ao desenvolvimento de Agentes Inteligentes que utilizam Deep Reinforcement Learning \tpara tarefas de Processamento de Linguagem Natural em língua portuguesa. \tAutor: Gabriel Pontes (@ograndoptimist) """ import random from source.emulador.textos import ESTADOS from source.emulador.textos import ACOES from source.emulador.textos import REFORCOS from source.emulador.textos import FINALIZADO from source.emulador.textos import DIMENSOES print(__doc__) class AdmiravelMundoNovo(object): def __init__(self): self.reforco = 0 self._checa_estado = False self._estado_texto = None self._estado_acao = None self._finalizado = False self._espaco_acoes = None self._estados_texto = ESTADOS self._acao_textos = ACOES self._acao_dimensoes = DIMENSOES self._estados_reforcos = REFORCOS self._estados_finalizado = FINALIZADO self._valores_estados_iniciais = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] print("\tO objetivo do jogo é coletar a chave preciosa de ouro." + ".\n\tPara tal, você precisa vasculhar a Ilha da Fantasia.") print() self._escolha_estado_inicial() def _escolha_estado_inicial(self): escolha = random.choice(self._valores_estados_iniciais) if escolha == 1: self._estado_1() elif escolha == 2: self._estado_2() elif escolha == 3: self._estado_3() elif escolha == 4: self._estado_4() elif escolha == 5: self._estado_5() elif escolha == 6: self._estado_6() elif escolha == 7: self._estado_7() elif escolha == 8: self._estado_8() elif escolha == 9: self._estado_9() elif escolha == 10: self._estado_10() elif escolha == 11: self._estado_11() elif escolha == 12: self._estado_12() elif escolha == 13: self._estado_13() elif escolha == 14: self._estado_14() def transicao_estado(self, acao): if self._valor_estado == 2 and acao == 0: self._estado_6() elif self._valor_estado == 2 and acao == 1: self._estado_3() elif self._valor_estado in [1, 3, 4] and acao == 0: self._estado_2() elif self._valor_estado == 3 and acao == 1: self._estado_5() elif self._valor_estado == 2 and acao == 2: self._estado_4() elif self._valor_estado == 5 and acao == 1: self._estado_3() elif self._valor_estado == 6 and acao == 1: self._estado_7() elif self._valor_estado in [7, 8] and acao == 0: self._estado_6() elif self._valor_estado == 6 and acao == 2: self._estado_8() elif self._valor_estado in [6, 10, 11] and acao == 0: self._estado_9() elif self._valor_estado == 9 and acao == 1: self._estado_10() elif self._valor_estado == 9 and acao == 2: self._estado_11() elif self._valor_estado in [5, 9, 13] and acao == 0: self._estado_12() elif self._valor_estado == 12 and acao == 0: self._estado_13() elif self._valor_estado == 12 and acao == 1: self._estado_final() elif self._valor_estado == 9 and acao == 3: self._estado_6() elif self._valor_estado == 6 and acao == 3: self._estado_2() def _estado_1(self): self._reforco_imediato = self._estados_reforcos['estado_1'] self.reforco += self._reforco_imediato self._valor_estado = 1 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_1'] self._estado_acao = self._acao_textos['estado_1'] self._espaco_acoes = self._acao_dimensoes['estado_1'] def _estado_2(self): self._reforco_imediato = self._estados_reforcos['estado_2'] self.reforco += self._reforco_imediato self._valor_estado = 2 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_2'] self._estado_acao = self._acao_textos['estado_2'] self._espaco_acoes = self._acao_dimensoes['estado_2'] def _estado_3(self): self._reforco_imediato = self._estados_reforcos['estado_3'] self.reforco += self._reforco_imediato self._valor_estado = 3 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_3'] self._estado_acao = self._acao_textos['estado_3'] self._espaco_acoes = self._acao_dimensoes['estado_3'] def _estado_4(self): self._reforco_imediato = self._estados_reforcos['estado_4'] self.reforco += self._reforco_imediato self._valor_estado = 4 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_4'] self._estado_acao = self._acao_textos['estado_4'] self._espaco_acoes = self._acao_dimensoes['estado_4'] def _estado_5(self): self._reforco_imediato = self._estados_reforcos['estado_5'] self.reforco += self._reforco_imediato self._valor_estado = 5 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_5'] self._estado_acao = self._acao_textos['estado_5'] self._espaco_acoes = self._acao_dimensoes['estado_5'] def _estado_6(self): self._reforco_imediato = self._estados_reforcos['estado_6'] self.reforco += self._reforco_imediato self._valor_estado = 6 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_6'] self._estado_acao = self._acao_textos['estado_6'] self._espaco_acoes = self._acao_dimensoes['estado_6'] def _estado_7(self): self._reforco_imediato = self._estados_reforcos['estado_7'] self.reforco += self._reforco_imediato self._valor_estado = 7 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_7'] self._estado_acao = self._acao_textos['estado_7'] self._espaco_acoes = self._acao_dimensoes['estado_7'] def _estado_8(self): self._reforco_imediato = self._estados_reforcos['estado_7'] self.reforco += self._reforco_imediato self._valor_estado = 8 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_8'] self._estado_acao = self._acao_textos['estado_7'] self._espaco_acoes = self._acao_dimensoes['estado_7'] def _estado_9(self): self._reforco_imediato = self._estados_reforcos['estado_9'] self.reforco += self._reforco_imediato self._valor_estado = 9 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_9'] self._estado_acao = self._acao_textos['estado_9'] self._espaco_acoes = self._acao_dimensoes['estado_9'] def _estado_10(self): self._reforco_imediato = self._estados_reforcos['estado_10'] self.reforco += self._reforco_imediato self._valor_estado = 10 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_10'] self._estado_acao = self._acao_textos['estado_10'] self._espaco_acoes = self._acao_dimensoes['estado_10'] def _estado_11(self): self._reforco_imediato = self._estados_reforcos['estado_10'] self.reforco += self._reforco_imediato self._valor_estado = 11 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_11'] self._estado_acao = self._acao_textos['estado_10'] self._espaco_acoes = self._acao_dimensoes['estado_10'] def _estado_12(self): self._reforco_imediato = self._estados_reforcos['estado_12'] self.reforco += self._reforco_imediato self._valor_estado = 12 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_12'] self._estado_acao = self._acao_textos['estado_12'] self._espaco_acoes = self._acao_dimensoes['estado_12'] def _estado_13(self): self._reforco_imediato = self._estados_reforcos['estado_13'] self.reforco -= self._reforco_imediato self._valor_estado = 13 self._finalizado = self._estados_finalizado['estado_1'] self._estado_texto = self._estados_texto['estado_13'] self._estado_acao = self._acao_textos['estado_13'] self._espaco_acoes = self._acao_dimensoes['estado_13'] def _estado_14(self): self._reforco_imediato = self._estados_reforcos['estado_14'] self.reforco -= self._reforco_imediato self._valor_estado = 14 self._finalizado = self._estados_finalizado['estado_14'] self._estado_texto = self._estados_texto['estado_14'] self._estado_acao = self._acao_textos['estado_14'] self._espaco_acoes = self._acao_dimensoes['estado_14'] def _estado_final(self): self._reforco_imediato = self._estados_reforcos['estado_final'] self.reforco += self._reforco_imediato self._finalizado = self._estados_finalizado['estado_final'] self._estado_texto = self._estados_texto['estado_final'] print("\tReforço acumulado de {0}".format(self.reforco)) self._estado_acao = "" def _pacote_acoes(self): if self._valor_estado in [1, 4, 7, 8, 10, 11, 13]: return [0] elif self._valor_estado in [2]: return [0, 1, 2] elif self._valor_estado in [3, 5, 12]: return [0, 1] elif self._valor_estado in [9, 6]: return [0, 1, 2, 3] def checa_acao(self, acao): if acao in self._pacote_acoes(): return True else: return False def read_1(self): return self._estado_texto, self._estado_acao, self._espaco_acoes, self._reforco_imediato, self._finalizado def read(self): return self._estado_texto, self._estado_acao, self._espaco_acoes def imprime_acao(self, acoes): for cont, acao in enumerate(acoes): print("\t[{0}] {1}".format(cont, acao)) def emulador(self, acao): if self._valor_estado == 2 and acao == 0: # ok return self._estados_texto['estado_6'], self._acao_textos['estado_6'], self._acao_dimensoes['estado_6'], \ self._estados_reforcos['estado_6'], self._estados_finalizado['estado_1'] elif self._valor_estado == 2 and acao == 1: # ok return self._estados_texto['estado_9'], self._acao_textos['estado_9'], self._acao_dimensoes['estado_9'], \ self._estados_reforcos['estado_9'], self._estados_finalizado['estado_1'] elif self._valor_estado in [1, 3, 4] and acao == 0: return self._estados_texto['estado_2'], self._acao_textos['estado_2'], self._acao_dimensoes['estado_2'], \ self._estados_reforcos['estado_2'], self._estados_finalizado['estado_1'] elif self._valor_estado == 3 and acao == 1: return self._estados_texto['estado_5'], self._acao_textos['estado_5'], self._acao_dimensoes['estado_5'], \ self._estados_reforcos['estado_5'], self._estados_finalizado['estado_1'] elif self._valor_estado == 2 and acao == 2: # ok return self._estados_texto['estado_4'], self._acao_textos['estado_4'], self._acao_dimensoes['estado_4'], \ self._estados_reforcos['estado_4'], self._estados_finalizado['estado_1'] elif self._valor_estado == 5 and acao == 1: return self._estados_texto['estado_9'], self._acao_textos['estado_9'], self._acao_dimensoes['estado_9'], \ self._estados_reforcos['estado_9'], self._estados_finalizado['estado_1'] elif self._valor_estado == 6 and acao == 1: return self._estados_texto['estado_7'], self._acao_textos['estado_7'], self._acao_dimensoes['estado_7'], \ self._estados_reforcos['estado_7'], self._estados_finalizado['estado_1'] elif self._valor_estado in [7, 8] and acao == 0: return self._estados_texto['estado_6'], self._acao_textos['estado_6'], self._acao_dimensoes['estado_6'], \ self._estados_reforcos['estado_6'], self._estados_finalizado['estado_1'] elif self._valor_estado == 6 and acao == 2: return self._estados_texto['estado_8'], self._acao_textos['estado_7'], self._acao_dimensoes['estado_7'], \ self._estados_reforcos['estado_7'], self._estados_finalizado['estado_1'] elif self._valor_estado == 9 and acao == 1: return self._estados_texto['estado_10'], self._acao_textos['estado_10'], self._acao_dimensoes['estado_10'], \ self._estados_reforcos['estado_10'], self._estados_finalizado['estado_1'] elif self._valor_estado in [6, 10, 11] and acao == 0: return self._estados_texto['estado_9'], self._acao_textos['estado_9'], self._acao_dimensoes['estado_9'], \ self._estados_reforcos['estado_9'], self._estados_finalizado['estado_1'] elif self._valor_estado == 9 and acao == 2: return self._estados_texto['estado_11'], self._acao_textos['estado_10'], self._acao_dimensoes['estado_10'], \ self._estados_reforcos['estado_10'], self._estados_finalizado['estado_1'] elif self._valor_estado in [5, 9, 13] and acao == 0: return self._estados_texto['estado_12'], self._acao_textos['estado_12'], self._acao_dimensoes['estado_12'], \ self._estados_reforcos['estado_12'], self._estados_finalizado['estado_1'] elif self._valor_estado == 12 and acao == 0: return self._estados_texto['estado_13'], self._acao_textos['estado_13'], self._acao_dimensoes['estado_13'], \ self._estados_reforcos['estado_13'], self._estados_finalizado['estado_1'] elif self._valor_estado == 12 and acao == 1: return self._estados_texto['estado_final'], self._acao_textos['estado_final'], self._acao_dimensoes[ 'estado_final'], self._estados_reforcos['estado_final'], self._estados_finalizado['estado_final'] elif self._valor_estado == 9 and acao == 3: return self._estados_texto['estado_6'], self._acao_textos['estado_6'], self._acao_dimensoes['estado_6'], \ self._estados_reforcos['estado_6'], self._estados_finalizado['estado_1'] elif self._valor_estado == 6 and acao == 3: return self._estados_texto['estado_2'], self._acao_textos['estado_2'], self._acao_dimensoes['estado_2'], \ self._estados_reforcos['estado_2'], self._estados_finalizado['estado_1']
flexible
{ "blob_id": "38ffbb6a66837e975a611a57579bb365ab69a32c", "index": 9504, "step-1": "<mask token>\n\n\nclass AdmiravelMundoNovo(object):\n <mask token>\n <mask token>\n\n def transicao_estado(self, acao):\n if self._valor_estado == 2 and acao == 0:\n self._estado_6()\n elif self._valor_estado == 2 and acao == 1:\n self._estado_3()\n elif self._valor_estado in [1, 3, 4] and acao == 0:\n self._estado_2()\n elif self._valor_estado == 3 and acao == 1:\n self._estado_5()\n elif self._valor_estado == 2 and acao == 2:\n self._estado_4()\n elif self._valor_estado == 5 and acao == 1:\n self._estado_3()\n elif self._valor_estado == 6 and acao == 1:\n self._estado_7()\n elif self._valor_estado in [7, 8] and acao == 0:\n self._estado_6()\n elif self._valor_estado == 6 and acao == 2:\n self._estado_8()\n elif self._valor_estado in [6, 10, 11] and acao == 0:\n self._estado_9()\n elif self._valor_estado == 9 and acao == 1:\n self._estado_10()\n elif self._valor_estado == 9 and acao == 2:\n self._estado_11()\n elif self._valor_estado in [5, 9, 13] and acao == 0:\n self._estado_12()\n elif self._valor_estado == 12 and acao == 0:\n self._estado_13()\n elif self._valor_estado == 12 and acao == 1:\n self._estado_final()\n elif self._valor_estado == 9 and acao == 3:\n self._estado_6()\n elif self._valor_estado == 6 and acao == 3:\n self._estado_2()\n\n def _estado_1(self):\n self._reforco_imediato = self._estados_reforcos['estado_1']\n self.reforco += self._reforco_imediato\n self._valor_estado = 1\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_1']\n self._estado_acao = self._acao_textos['estado_1']\n self._espaco_acoes = self._acao_dimensoes['estado_1']\n\n def _estado_2(self):\n self._reforco_imediato = self._estados_reforcos['estado_2']\n self.reforco += self._reforco_imediato\n self._valor_estado = 2\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_2']\n self._estado_acao = self._acao_textos['estado_2']\n self._espaco_acoes = self._acao_dimensoes['estado_2']\n\n def _estado_3(self):\n self._reforco_imediato = self._estados_reforcos['estado_3']\n self.reforco += self._reforco_imediato\n self._valor_estado = 3\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_3']\n self._estado_acao = self._acao_textos['estado_3']\n self._espaco_acoes = self._acao_dimensoes['estado_3']\n <mask token>\n\n def _estado_5(self):\n self._reforco_imediato = self._estados_reforcos['estado_5']\n self.reforco += self._reforco_imediato\n self._valor_estado = 5\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_5']\n self._estado_acao = self._acao_textos['estado_5']\n self._espaco_acoes = self._acao_dimensoes['estado_5']\n\n def _estado_6(self):\n self._reforco_imediato = self._estados_reforcos['estado_6']\n self.reforco += self._reforco_imediato\n self._valor_estado = 6\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_6']\n self._estado_acao = self._acao_textos['estado_6']\n self._espaco_acoes = self._acao_dimensoes['estado_6']\n <mask token>\n <mask token>\n\n def _estado_9(self):\n self._reforco_imediato = self._estados_reforcos['estado_9']\n self.reforco += self._reforco_imediato\n self._valor_estado = 9\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_9']\n self._estado_acao = self._acao_textos['estado_9']\n self._espaco_acoes = self._acao_dimensoes['estado_9']\n\n def _estado_10(self):\n self._reforco_imediato = self._estados_reforcos['estado_10']\n self.reforco += self._reforco_imediato\n self._valor_estado = 10\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_10']\n self._estado_acao = self._acao_textos['estado_10']\n self._espaco_acoes = self._acao_dimensoes['estado_10']\n\n def _estado_11(self):\n self._reforco_imediato = self._estados_reforcos['estado_10']\n self.reforco += self._reforco_imediato\n self._valor_estado = 11\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_11']\n self._estado_acao = self._acao_textos['estado_10']\n self._espaco_acoes = self._acao_dimensoes['estado_10']\n\n def _estado_12(self):\n self._reforco_imediato = self._estados_reforcos['estado_12']\n self.reforco += self._reforco_imediato\n self._valor_estado = 12\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_12']\n self._estado_acao = self._acao_textos['estado_12']\n self._espaco_acoes = self._acao_dimensoes['estado_12']\n <mask token>\n\n def _estado_14(self):\n self._reforco_imediato = self._estados_reforcos['estado_14']\n self.reforco -= self._reforco_imediato\n self._valor_estado = 14\n self._finalizado = self._estados_finalizado['estado_14']\n self._estado_texto = self._estados_texto['estado_14']\n self._estado_acao = self._acao_textos['estado_14']\n self._espaco_acoes = self._acao_dimensoes['estado_14']\n <mask token>\n\n def _pacote_acoes(self):\n if self._valor_estado in [1, 4, 7, 8, 10, 11, 13]:\n return [0]\n elif self._valor_estado in [2]:\n return [0, 1, 2]\n elif self._valor_estado in [3, 5, 12]:\n return [0, 1]\n elif self._valor_estado in [9, 6]:\n return [0, 1, 2, 3]\n\n def checa_acao(self, acao):\n if acao in self._pacote_acoes():\n return True\n else:\n return False\n\n def read_1(self):\n return (self._estado_texto, self._estado_acao, self._espaco_acoes,\n self._reforco_imediato, self._finalizado)\n\n def read(self):\n return self._estado_texto, self._estado_acao, self._espaco_acoes\n\n def imprime_acao(self, acoes):\n for cont, acao in enumerate(acoes):\n print('\\t[{0}] {1}'.format(cont, acao))\n\n def emulador(self, acao):\n if self._valor_estado == 2 and acao == 0:\n return self._estados_texto['estado_6'], self._acao_textos[\n 'estado_6'], self._acao_dimensoes['estado_6'\n ], self._estados_reforcos['estado_6'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 2 and acao == 1:\n return self._estados_texto['estado_9'], self._acao_textos[\n 'estado_9'], self._acao_dimensoes['estado_9'\n ], self._estados_reforcos['estado_9'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado in [1, 3, 4] and acao == 0:\n return self._estados_texto['estado_2'], self._acao_textos[\n 'estado_2'], self._acao_dimensoes['estado_2'\n ], self._estados_reforcos['estado_2'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 3 and acao == 1:\n return self._estados_texto['estado_5'], self._acao_textos[\n 'estado_5'], self._acao_dimensoes['estado_5'\n ], self._estados_reforcos['estado_5'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 2 and acao == 2:\n return self._estados_texto['estado_4'], self._acao_textos[\n 'estado_4'], self._acao_dimensoes['estado_4'\n ], self._estados_reforcos['estado_4'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 5 and acao == 1:\n return self._estados_texto['estado_9'], self._acao_textos[\n 'estado_9'], self._acao_dimensoes['estado_9'\n ], self._estados_reforcos['estado_9'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 6 and acao == 1:\n return self._estados_texto['estado_7'], self._acao_textos[\n 'estado_7'], self._acao_dimensoes['estado_7'\n ], self._estados_reforcos['estado_7'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado in [7, 8] and acao == 0:\n return self._estados_texto['estado_6'], self._acao_textos[\n 'estado_6'], self._acao_dimensoes['estado_6'\n ], self._estados_reforcos['estado_6'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 6 and acao == 2:\n return self._estados_texto['estado_8'], self._acao_textos[\n 'estado_7'], self._acao_dimensoes['estado_7'\n ], self._estados_reforcos['estado_7'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 9 and acao == 1:\n return self._estados_texto['estado_10'], self._acao_textos[\n 'estado_10'], self._acao_dimensoes['estado_10'\n ], self._estados_reforcos['estado_10'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado in [6, 10, 11] and acao == 0:\n return self._estados_texto['estado_9'], self._acao_textos[\n 'estado_9'], self._acao_dimensoes['estado_9'\n ], self._estados_reforcos['estado_9'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 9 and acao == 2:\n return self._estados_texto['estado_11'], self._acao_textos[\n 'estado_10'], self._acao_dimensoes['estado_10'\n ], self._estados_reforcos['estado_10'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado in [5, 9, 13] and acao == 0:\n return self._estados_texto['estado_12'], self._acao_textos[\n 'estado_12'], self._acao_dimensoes['estado_12'\n ], self._estados_reforcos['estado_12'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 12 and acao == 0:\n return self._estados_texto['estado_13'], self._acao_textos[\n 'estado_13'], self._acao_dimensoes['estado_13'\n ], self._estados_reforcos['estado_13'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 12 and acao == 1:\n return self._estados_texto['estado_final'], self._acao_textos[\n 'estado_final'], self._acao_dimensoes['estado_final'\n ], self._estados_reforcos['estado_final'\n ], self._estados_finalizado['estado_final']\n elif self._valor_estado == 9 and acao == 3:\n return self._estados_texto['estado_6'], self._acao_textos[\n 'estado_6'], self._acao_dimensoes['estado_6'\n ], self._estados_reforcos['estado_6'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 6 and acao == 3:\n return self._estados_texto['estado_2'], self._acao_textos[\n 'estado_2'], self._acao_dimensoes['estado_2'\n ], self._estados_reforcos['estado_2'\n ], self._estados_finalizado['estado_1']\n", "step-2": "<mask token>\n\n\nclass AdmiravelMundoNovo(object):\n\n def __init__(self):\n self.reforco = 0\n self._checa_estado = False\n self._estado_texto = None\n self._estado_acao = None\n self._finalizado = False\n self._espaco_acoes = None\n self._estados_texto = ESTADOS\n self._acao_textos = ACOES\n self._acao_dimensoes = DIMENSOES\n self._estados_reforcos = REFORCOS\n self._estados_finalizado = FINALIZADO\n self._valores_estados_iniciais = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,\n 12, 13]\n print('\\tO objetivo do jogo é coletar a chave preciosa de ouro.' +\n \"\"\".\n\tPara tal, você precisa vasculhar a Ilha da Fantasia.\"\"\")\n print()\n self._escolha_estado_inicial()\n <mask token>\n\n def transicao_estado(self, acao):\n if self._valor_estado == 2 and acao == 0:\n self._estado_6()\n elif self._valor_estado == 2 and acao == 1:\n self._estado_3()\n elif self._valor_estado in [1, 3, 4] and acao == 0:\n self._estado_2()\n elif self._valor_estado == 3 and acao == 1:\n self._estado_5()\n elif self._valor_estado == 2 and acao == 2:\n self._estado_4()\n elif self._valor_estado == 5 and acao == 1:\n self._estado_3()\n elif self._valor_estado == 6 and acao == 1:\n self._estado_7()\n elif self._valor_estado in [7, 8] and acao == 0:\n self._estado_6()\n elif self._valor_estado == 6 and acao == 2:\n self._estado_8()\n elif self._valor_estado in [6, 10, 11] and acao == 0:\n self._estado_9()\n elif self._valor_estado == 9 and acao == 1:\n self._estado_10()\n elif self._valor_estado == 9 and acao == 2:\n self._estado_11()\n elif self._valor_estado in [5, 9, 13] and acao == 0:\n self._estado_12()\n elif self._valor_estado == 12 and acao == 0:\n self._estado_13()\n elif self._valor_estado == 12 and acao == 1:\n self._estado_final()\n elif self._valor_estado == 9 and acao == 3:\n self._estado_6()\n elif self._valor_estado == 6 and acao == 3:\n self._estado_2()\n\n def _estado_1(self):\n self._reforco_imediato = self._estados_reforcos['estado_1']\n self.reforco += self._reforco_imediato\n self._valor_estado = 1\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_1']\n self._estado_acao = self._acao_textos['estado_1']\n self._espaco_acoes = self._acao_dimensoes['estado_1']\n\n def _estado_2(self):\n self._reforco_imediato = self._estados_reforcos['estado_2']\n self.reforco += self._reforco_imediato\n self._valor_estado = 2\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_2']\n self._estado_acao = self._acao_textos['estado_2']\n self._espaco_acoes = self._acao_dimensoes['estado_2']\n\n def _estado_3(self):\n self._reforco_imediato = self._estados_reforcos['estado_3']\n self.reforco += self._reforco_imediato\n self._valor_estado = 3\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_3']\n self._estado_acao = self._acao_textos['estado_3']\n self._espaco_acoes = self._acao_dimensoes['estado_3']\n <mask token>\n\n def _estado_5(self):\n self._reforco_imediato = self._estados_reforcos['estado_5']\n self.reforco += self._reforco_imediato\n self._valor_estado = 5\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_5']\n self._estado_acao = self._acao_textos['estado_5']\n self._espaco_acoes = self._acao_dimensoes['estado_5']\n\n def _estado_6(self):\n self._reforco_imediato = self._estados_reforcos['estado_6']\n self.reforco += self._reforco_imediato\n self._valor_estado = 6\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_6']\n self._estado_acao = self._acao_textos['estado_6']\n self._espaco_acoes = self._acao_dimensoes['estado_6']\n <mask token>\n\n def _estado_8(self):\n self._reforco_imediato = self._estados_reforcos['estado_7']\n self.reforco += self._reforco_imediato\n self._valor_estado = 8\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_8']\n self._estado_acao = self._acao_textos['estado_7']\n self._espaco_acoes = self._acao_dimensoes['estado_7']\n\n def _estado_9(self):\n self._reforco_imediato = self._estados_reforcos['estado_9']\n self.reforco += self._reforco_imediato\n self._valor_estado = 9\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_9']\n self._estado_acao = self._acao_textos['estado_9']\n self._espaco_acoes = self._acao_dimensoes['estado_9']\n\n def _estado_10(self):\n self._reforco_imediato = self._estados_reforcos['estado_10']\n self.reforco += self._reforco_imediato\n self._valor_estado = 10\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_10']\n self._estado_acao = self._acao_textos['estado_10']\n self._espaco_acoes = self._acao_dimensoes['estado_10']\n\n def _estado_11(self):\n self._reforco_imediato = self._estados_reforcos['estado_10']\n self.reforco += self._reforco_imediato\n self._valor_estado = 11\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_11']\n self._estado_acao = self._acao_textos['estado_10']\n self._espaco_acoes = self._acao_dimensoes['estado_10']\n\n def _estado_12(self):\n self._reforco_imediato = self._estados_reforcos['estado_12']\n self.reforco += self._reforco_imediato\n self._valor_estado = 12\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_12']\n self._estado_acao = self._acao_textos['estado_12']\n self._espaco_acoes = self._acao_dimensoes['estado_12']\n <mask token>\n\n def _estado_14(self):\n self._reforco_imediato = self._estados_reforcos['estado_14']\n self.reforco -= self._reforco_imediato\n self._valor_estado = 14\n self._finalizado = self._estados_finalizado['estado_14']\n self._estado_texto = self._estados_texto['estado_14']\n self._estado_acao = self._acao_textos['estado_14']\n self._espaco_acoes = self._acao_dimensoes['estado_14']\n\n def _estado_final(self):\n self._reforco_imediato = self._estados_reforcos['estado_final']\n self.reforco += self._reforco_imediato\n self._finalizado = self._estados_finalizado['estado_final']\n self._estado_texto = self._estados_texto['estado_final']\n print('\\tReforço acumulado de {0}'.format(self.reforco))\n self._estado_acao = ''\n\n def _pacote_acoes(self):\n if self._valor_estado in [1, 4, 7, 8, 10, 11, 13]:\n return [0]\n elif self._valor_estado in [2]:\n return [0, 1, 2]\n elif self._valor_estado in [3, 5, 12]:\n return [0, 1]\n elif self._valor_estado in [9, 6]:\n return [0, 1, 2, 3]\n\n def checa_acao(self, acao):\n if acao in self._pacote_acoes():\n return True\n else:\n return False\n\n def read_1(self):\n return (self._estado_texto, self._estado_acao, self._espaco_acoes,\n self._reforco_imediato, self._finalizado)\n\n def read(self):\n return self._estado_texto, self._estado_acao, self._espaco_acoes\n\n def imprime_acao(self, acoes):\n for cont, acao in enumerate(acoes):\n print('\\t[{0}] {1}'.format(cont, acao))\n\n def emulador(self, acao):\n if self._valor_estado == 2 and acao == 0:\n return self._estados_texto['estado_6'], self._acao_textos[\n 'estado_6'], self._acao_dimensoes['estado_6'\n ], self._estados_reforcos['estado_6'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 2 and acao == 1:\n return self._estados_texto['estado_9'], self._acao_textos[\n 'estado_9'], self._acao_dimensoes['estado_9'\n ], self._estados_reforcos['estado_9'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado in [1, 3, 4] and acao == 0:\n return self._estados_texto['estado_2'], self._acao_textos[\n 'estado_2'], self._acao_dimensoes['estado_2'\n ], self._estados_reforcos['estado_2'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 3 and acao == 1:\n return self._estados_texto['estado_5'], self._acao_textos[\n 'estado_5'], self._acao_dimensoes['estado_5'\n ], self._estados_reforcos['estado_5'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 2 and acao == 2:\n return self._estados_texto['estado_4'], self._acao_textos[\n 'estado_4'], self._acao_dimensoes['estado_4'\n ], self._estados_reforcos['estado_4'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 5 and acao == 1:\n return self._estados_texto['estado_9'], self._acao_textos[\n 'estado_9'], self._acao_dimensoes['estado_9'\n ], self._estados_reforcos['estado_9'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 6 and acao == 1:\n return self._estados_texto['estado_7'], self._acao_textos[\n 'estado_7'], self._acao_dimensoes['estado_7'\n ], self._estados_reforcos['estado_7'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado in [7, 8] and acao == 0:\n return self._estados_texto['estado_6'], self._acao_textos[\n 'estado_6'], self._acao_dimensoes['estado_6'\n ], self._estados_reforcos['estado_6'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 6 and acao == 2:\n return self._estados_texto['estado_8'], self._acao_textos[\n 'estado_7'], self._acao_dimensoes['estado_7'\n ], self._estados_reforcos['estado_7'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 9 and acao == 1:\n return self._estados_texto['estado_10'], self._acao_textos[\n 'estado_10'], self._acao_dimensoes['estado_10'\n ], self._estados_reforcos['estado_10'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado in [6, 10, 11] and acao == 0:\n return self._estados_texto['estado_9'], self._acao_textos[\n 'estado_9'], self._acao_dimensoes['estado_9'\n ], self._estados_reforcos['estado_9'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 9 and acao == 2:\n return self._estados_texto['estado_11'], self._acao_textos[\n 'estado_10'], self._acao_dimensoes['estado_10'\n ], self._estados_reforcos['estado_10'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado in [5, 9, 13] and acao == 0:\n return self._estados_texto['estado_12'], self._acao_textos[\n 'estado_12'], self._acao_dimensoes['estado_12'\n ], self._estados_reforcos['estado_12'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 12 and acao == 0:\n return self._estados_texto['estado_13'], self._acao_textos[\n 'estado_13'], self._acao_dimensoes['estado_13'\n ], self._estados_reforcos['estado_13'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 12 and acao == 1:\n return self._estados_texto['estado_final'], self._acao_textos[\n 'estado_final'], self._acao_dimensoes['estado_final'\n ], self._estados_reforcos['estado_final'\n ], self._estados_finalizado['estado_final']\n elif self._valor_estado == 9 and acao == 3:\n return self._estados_texto['estado_6'], self._acao_textos[\n 'estado_6'], self._acao_dimensoes['estado_6'\n ], self._estados_reforcos['estado_6'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 6 and acao == 3:\n return self._estados_texto['estado_2'], self._acao_textos[\n 'estado_2'], self._acao_dimensoes['estado_2'\n ], self._estados_reforcos['estado_2'\n ], self._estados_finalizado['estado_1']\n", "step-3": "<mask token>\n\n\nclass AdmiravelMundoNovo(object):\n\n def __init__(self):\n self.reforco = 0\n self._checa_estado = False\n self._estado_texto = None\n self._estado_acao = None\n self._finalizado = False\n self._espaco_acoes = None\n self._estados_texto = ESTADOS\n self._acao_textos = ACOES\n self._acao_dimensoes = DIMENSOES\n self._estados_reforcos = REFORCOS\n self._estados_finalizado = FINALIZADO\n self._valores_estados_iniciais = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,\n 12, 13]\n print('\\tO objetivo do jogo é coletar a chave preciosa de ouro.' +\n \"\"\".\n\tPara tal, você precisa vasculhar a Ilha da Fantasia.\"\"\")\n print()\n self._escolha_estado_inicial()\n <mask token>\n\n def transicao_estado(self, acao):\n if self._valor_estado == 2 and acao == 0:\n self._estado_6()\n elif self._valor_estado == 2 and acao == 1:\n self._estado_3()\n elif self._valor_estado in [1, 3, 4] and acao == 0:\n self._estado_2()\n elif self._valor_estado == 3 and acao == 1:\n self._estado_5()\n elif self._valor_estado == 2 and acao == 2:\n self._estado_4()\n elif self._valor_estado == 5 and acao == 1:\n self._estado_3()\n elif self._valor_estado == 6 and acao == 1:\n self._estado_7()\n elif self._valor_estado in [7, 8] and acao == 0:\n self._estado_6()\n elif self._valor_estado == 6 and acao == 2:\n self._estado_8()\n elif self._valor_estado in [6, 10, 11] and acao == 0:\n self._estado_9()\n elif self._valor_estado == 9 and acao == 1:\n self._estado_10()\n elif self._valor_estado == 9 and acao == 2:\n self._estado_11()\n elif self._valor_estado in [5, 9, 13] and acao == 0:\n self._estado_12()\n elif self._valor_estado == 12 and acao == 0:\n self._estado_13()\n elif self._valor_estado == 12 and acao == 1:\n self._estado_final()\n elif self._valor_estado == 9 and acao == 3:\n self._estado_6()\n elif self._valor_estado == 6 and acao == 3:\n self._estado_2()\n\n def _estado_1(self):\n self._reforco_imediato = self._estados_reforcos['estado_1']\n self.reforco += self._reforco_imediato\n self._valor_estado = 1\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_1']\n self._estado_acao = self._acao_textos['estado_1']\n self._espaco_acoes = self._acao_dimensoes['estado_1']\n\n def _estado_2(self):\n self._reforco_imediato = self._estados_reforcos['estado_2']\n self.reforco += self._reforco_imediato\n self._valor_estado = 2\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_2']\n self._estado_acao = self._acao_textos['estado_2']\n self._espaco_acoes = self._acao_dimensoes['estado_2']\n\n def _estado_3(self):\n self._reforco_imediato = self._estados_reforcos['estado_3']\n self.reforco += self._reforco_imediato\n self._valor_estado = 3\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_3']\n self._estado_acao = self._acao_textos['estado_3']\n self._espaco_acoes = self._acao_dimensoes['estado_3']\n\n def _estado_4(self):\n self._reforco_imediato = self._estados_reforcos['estado_4']\n self.reforco += self._reforco_imediato\n self._valor_estado = 4\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_4']\n self._estado_acao = self._acao_textos['estado_4']\n self._espaco_acoes = self._acao_dimensoes['estado_4']\n\n def _estado_5(self):\n self._reforco_imediato = self._estados_reforcos['estado_5']\n self.reforco += self._reforco_imediato\n self._valor_estado = 5\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_5']\n self._estado_acao = self._acao_textos['estado_5']\n self._espaco_acoes = self._acao_dimensoes['estado_5']\n\n def _estado_6(self):\n self._reforco_imediato = self._estados_reforcos['estado_6']\n self.reforco += self._reforco_imediato\n self._valor_estado = 6\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_6']\n self._estado_acao = self._acao_textos['estado_6']\n self._espaco_acoes = self._acao_dimensoes['estado_6']\n\n def _estado_7(self):\n self._reforco_imediato = self._estados_reforcos['estado_7']\n self.reforco += self._reforco_imediato\n self._valor_estado = 7\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_7']\n self._estado_acao = self._acao_textos['estado_7']\n self._espaco_acoes = self._acao_dimensoes['estado_7']\n\n def _estado_8(self):\n self._reforco_imediato = self._estados_reforcos['estado_7']\n self.reforco += self._reforco_imediato\n self._valor_estado = 8\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_8']\n self._estado_acao = self._acao_textos['estado_7']\n self._espaco_acoes = self._acao_dimensoes['estado_7']\n\n def _estado_9(self):\n self._reforco_imediato = self._estados_reforcos['estado_9']\n self.reforco += self._reforco_imediato\n self._valor_estado = 9\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_9']\n self._estado_acao = self._acao_textos['estado_9']\n self._espaco_acoes = self._acao_dimensoes['estado_9']\n\n def _estado_10(self):\n self._reforco_imediato = self._estados_reforcos['estado_10']\n self.reforco += self._reforco_imediato\n self._valor_estado = 10\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_10']\n self._estado_acao = self._acao_textos['estado_10']\n self._espaco_acoes = self._acao_dimensoes['estado_10']\n\n def _estado_11(self):\n self._reforco_imediato = self._estados_reforcos['estado_10']\n self.reforco += self._reforco_imediato\n self._valor_estado = 11\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_11']\n self._estado_acao = self._acao_textos['estado_10']\n self._espaco_acoes = self._acao_dimensoes['estado_10']\n\n def _estado_12(self):\n self._reforco_imediato = self._estados_reforcos['estado_12']\n self.reforco += self._reforco_imediato\n self._valor_estado = 12\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_12']\n self._estado_acao = self._acao_textos['estado_12']\n self._espaco_acoes = self._acao_dimensoes['estado_12']\n <mask token>\n\n def _estado_14(self):\n self._reforco_imediato = self._estados_reforcos['estado_14']\n self.reforco -= self._reforco_imediato\n self._valor_estado = 14\n self._finalizado = self._estados_finalizado['estado_14']\n self._estado_texto = self._estados_texto['estado_14']\n self._estado_acao = self._acao_textos['estado_14']\n self._espaco_acoes = self._acao_dimensoes['estado_14']\n\n def _estado_final(self):\n self._reforco_imediato = self._estados_reforcos['estado_final']\n self.reforco += self._reforco_imediato\n self._finalizado = self._estados_finalizado['estado_final']\n self._estado_texto = self._estados_texto['estado_final']\n print('\\tReforço acumulado de {0}'.format(self.reforco))\n self._estado_acao = ''\n\n def _pacote_acoes(self):\n if self._valor_estado in [1, 4, 7, 8, 10, 11, 13]:\n return [0]\n elif self._valor_estado in [2]:\n return [0, 1, 2]\n elif self._valor_estado in [3, 5, 12]:\n return [0, 1]\n elif self._valor_estado in [9, 6]:\n return [0, 1, 2, 3]\n\n def checa_acao(self, acao):\n if acao in self._pacote_acoes():\n return True\n else:\n return False\n\n def read_1(self):\n return (self._estado_texto, self._estado_acao, self._espaco_acoes,\n self._reforco_imediato, self._finalizado)\n\n def read(self):\n return self._estado_texto, self._estado_acao, self._espaco_acoes\n\n def imprime_acao(self, acoes):\n for cont, acao in enumerate(acoes):\n print('\\t[{0}] {1}'.format(cont, acao))\n\n def emulador(self, acao):\n if self._valor_estado == 2 and acao == 0:\n return self._estados_texto['estado_6'], self._acao_textos[\n 'estado_6'], self._acao_dimensoes['estado_6'\n ], self._estados_reforcos['estado_6'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 2 and acao == 1:\n return self._estados_texto['estado_9'], self._acao_textos[\n 'estado_9'], self._acao_dimensoes['estado_9'\n ], self._estados_reforcos['estado_9'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado in [1, 3, 4] and acao == 0:\n return self._estados_texto['estado_2'], self._acao_textos[\n 'estado_2'], self._acao_dimensoes['estado_2'\n ], self._estados_reforcos['estado_2'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 3 and acao == 1:\n return self._estados_texto['estado_5'], self._acao_textos[\n 'estado_5'], self._acao_dimensoes['estado_5'\n ], self._estados_reforcos['estado_5'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 2 and acao == 2:\n return self._estados_texto['estado_4'], self._acao_textos[\n 'estado_4'], self._acao_dimensoes['estado_4'\n ], self._estados_reforcos['estado_4'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 5 and acao == 1:\n return self._estados_texto['estado_9'], self._acao_textos[\n 'estado_9'], self._acao_dimensoes['estado_9'\n ], self._estados_reforcos['estado_9'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 6 and acao == 1:\n return self._estados_texto['estado_7'], self._acao_textos[\n 'estado_7'], self._acao_dimensoes['estado_7'\n ], self._estados_reforcos['estado_7'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado in [7, 8] and acao == 0:\n return self._estados_texto['estado_6'], self._acao_textos[\n 'estado_6'], self._acao_dimensoes['estado_6'\n ], self._estados_reforcos['estado_6'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 6 and acao == 2:\n return self._estados_texto['estado_8'], self._acao_textos[\n 'estado_7'], self._acao_dimensoes['estado_7'\n ], self._estados_reforcos['estado_7'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 9 and acao == 1:\n return self._estados_texto['estado_10'], self._acao_textos[\n 'estado_10'], self._acao_dimensoes['estado_10'\n ], self._estados_reforcos['estado_10'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado in [6, 10, 11] and acao == 0:\n return self._estados_texto['estado_9'], self._acao_textos[\n 'estado_9'], self._acao_dimensoes['estado_9'\n ], self._estados_reforcos['estado_9'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 9 and acao == 2:\n return self._estados_texto['estado_11'], self._acao_textos[\n 'estado_10'], self._acao_dimensoes['estado_10'\n ], self._estados_reforcos['estado_10'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado in [5, 9, 13] and acao == 0:\n return self._estados_texto['estado_12'], self._acao_textos[\n 'estado_12'], self._acao_dimensoes['estado_12'\n ], self._estados_reforcos['estado_12'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 12 and acao == 0:\n return self._estados_texto['estado_13'], self._acao_textos[\n 'estado_13'], self._acao_dimensoes['estado_13'\n ], self._estados_reforcos['estado_13'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 12 and acao == 1:\n return self._estados_texto['estado_final'], self._acao_textos[\n 'estado_final'], self._acao_dimensoes['estado_final'\n ], self._estados_reforcos['estado_final'\n ], self._estados_finalizado['estado_final']\n elif self._valor_estado == 9 and acao == 3:\n return self._estados_texto['estado_6'], self._acao_textos[\n 'estado_6'], self._acao_dimensoes['estado_6'\n ], self._estados_reforcos['estado_6'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 6 and acao == 3:\n return self._estados_texto['estado_2'], self._acao_textos[\n 'estado_2'], self._acao_dimensoes['estado_2'\n ], self._estados_reforcos['estado_2'\n ], self._estados_finalizado['estado_1']\n", "step-4": "<mask token>\n\n\nclass AdmiravelMundoNovo(object):\n\n def __init__(self):\n self.reforco = 0\n self._checa_estado = False\n self._estado_texto = None\n self._estado_acao = None\n self._finalizado = False\n self._espaco_acoes = None\n self._estados_texto = ESTADOS\n self._acao_textos = ACOES\n self._acao_dimensoes = DIMENSOES\n self._estados_reforcos = REFORCOS\n self._estados_finalizado = FINALIZADO\n self._valores_estados_iniciais = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,\n 12, 13]\n print('\\tO objetivo do jogo é coletar a chave preciosa de ouro.' +\n \"\"\".\n\tPara tal, você precisa vasculhar a Ilha da Fantasia.\"\"\")\n print()\n self._escolha_estado_inicial()\n\n def _escolha_estado_inicial(self):\n escolha = random.choice(self._valores_estados_iniciais)\n if escolha == 1:\n self._estado_1()\n elif escolha == 2:\n self._estado_2()\n elif escolha == 3:\n self._estado_3()\n elif escolha == 4:\n self._estado_4()\n elif escolha == 5:\n self._estado_5()\n elif escolha == 6:\n self._estado_6()\n elif escolha == 7:\n self._estado_7()\n elif escolha == 8:\n self._estado_8()\n elif escolha == 9:\n self._estado_9()\n elif escolha == 10:\n self._estado_10()\n elif escolha == 11:\n self._estado_11()\n elif escolha == 12:\n self._estado_12()\n elif escolha == 13:\n self._estado_13()\n elif escolha == 14:\n self._estado_14()\n\n def transicao_estado(self, acao):\n if self._valor_estado == 2 and acao == 0:\n self._estado_6()\n elif self._valor_estado == 2 and acao == 1:\n self._estado_3()\n elif self._valor_estado in [1, 3, 4] and acao == 0:\n self._estado_2()\n elif self._valor_estado == 3 and acao == 1:\n self._estado_5()\n elif self._valor_estado == 2 and acao == 2:\n self._estado_4()\n elif self._valor_estado == 5 and acao == 1:\n self._estado_3()\n elif self._valor_estado == 6 and acao == 1:\n self._estado_7()\n elif self._valor_estado in [7, 8] and acao == 0:\n self._estado_6()\n elif self._valor_estado == 6 and acao == 2:\n self._estado_8()\n elif self._valor_estado in [6, 10, 11] and acao == 0:\n self._estado_9()\n elif self._valor_estado == 9 and acao == 1:\n self._estado_10()\n elif self._valor_estado == 9 and acao == 2:\n self._estado_11()\n elif self._valor_estado in [5, 9, 13] and acao == 0:\n self._estado_12()\n elif self._valor_estado == 12 and acao == 0:\n self._estado_13()\n elif self._valor_estado == 12 and acao == 1:\n self._estado_final()\n elif self._valor_estado == 9 and acao == 3:\n self._estado_6()\n elif self._valor_estado == 6 and acao == 3:\n self._estado_2()\n\n def _estado_1(self):\n self._reforco_imediato = self._estados_reforcos['estado_1']\n self.reforco += self._reforco_imediato\n self._valor_estado = 1\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_1']\n self._estado_acao = self._acao_textos['estado_1']\n self._espaco_acoes = self._acao_dimensoes['estado_1']\n\n def _estado_2(self):\n self._reforco_imediato = self._estados_reforcos['estado_2']\n self.reforco += self._reforco_imediato\n self._valor_estado = 2\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_2']\n self._estado_acao = self._acao_textos['estado_2']\n self._espaco_acoes = self._acao_dimensoes['estado_2']\n\n def _estado_3(self):\n self._reforco_imediato = self._estados_reforcos['estado_3']\n self.reforco += self._reforco_imediato\n self._valor_estado = 3\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_3']\n self._estado_acao = self._acao_textos['estado_3']\n self._espaco_acoes = self._acao_dimensoes['estado_3']\n\n def _estado_4(self):\n self._reforco_imediato = self._estados_reforcos['estado_4']\n self.reforco += self._reforco_imediato\n self._valor_estado = 4\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_4']\n self._estado_acao = self._acao_textos['estado_4']\n self._espaco_acoes = self._acao_dimensoes['estado_4']\n\n def _estado_5(self):\n self._reforco_imediato = self._estados_reforcos['estado_5']\n self.reforco += self._reforco_imediato\n self._valor_estado = 5\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_5']\n self._estado_acao = self._acao_textos['estado_5']\n self._espaco_acoes = self._acao_dimensoes['estado_5']\n\n def _estado_6(self):\n self._reforco_imediato = self._estados_reforcos['estado_6']\n self.reforco += self._reforco_imediato\n self._valor_estado = 6\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_6']\n self._estado_acao = self._acao_textos['estado_6']\n self._espaco_acoes = self._acao_dimensoes['estado_6']\n\n def _estado_7(self):\n self._reforco_imediato = self._estados_reforcos['estado_7']\n self.reforco += self._reforco_imediato\n self._valor_estado = 7\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_7']\n self._estado_acao = self._acao_textos['estado_7']\n self._espaco_acoes = self._acao_dimensoes['estado_7']\n\n def _estado_8(self):\n self._reforco_imediato = self._estados_reforcos['estado_7']\n self.reforco += self._reforco_imediato\n self._valor_estado = 8\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_8']\n self._estado_acao = self._acao_textos['estado_7']\n self._espaco_acoes = self._acao_dimensoes['estado_7']\n\n def _estado_9(self):\n self._reforco_imediato = self._estados_reforcos['estado_9']\n self.reforco += self._reforco_imediato\n self._valor_estado = 9\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_9']\n self._estado_acao = self._acao_textos['estado_9']\n self._espaco_acoes = self._acao_dimensoes['estado_9']\n\n def _estado_10(self):\n self._reforco_imediato = self._estados_reforcos['estado_10']\n self.reforco += self._reforco_imediato\n self._valor_estado = 10\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_10']\n self._estado_acao = self._acao_textos['estado_10']\n self._espaco_acoes = self._acao_dimensoes['estado_10']\n\n def _estado_11(self):\n self._reforco_imediato = self._estados_reforcos['estado_10']\n self.reforco += self._reforco_imediato\n self._valor_estado = 11\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_11']\n self._estado_acao = self._acao_textos['estado_10']\n self._espaco_acoes = self._acao_dimensoes['estado_10']\n\n def _estado_12(self):\n self._reforco_imediato = self._estados_reforcos['estado_12']\n self.reforco += self._reforco_imediato\n self._valor_estado = 12\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_12']\n self._estado_acao = self._acao_textos['estado_12']\n self._espaco_acoes = self._acao_dimensoes['estado_12']\n\n def _estado_13(self):\n self._reforco_imediato = self._estados_reforcos['estado_13']\n self.reforco -= self._reforco_imediato\n self._valor_estado = 13\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_13']\n self._estado_acao = self._acao_textos['estado_13']\n self._espaco_acoes = self._acao_dimensoes['estado_13']\n\n def _estado_14(self):\n self._reforco_imediato = self._estados_reforcos['estado_14']\n self.reforco -= self._reforco_imediato\n self._valor_estado = 14\n self._finalizado = self._estados_finalizado['estado_14']\n self._estado_texto = self._estados_texto['estado_14']\n self._estado_acao = self._acao_textos['estado_14']\n self._espaco_acoes = self._acao_dimensoes['estado_14']\n\n def _estado_final(self):\n self._reforco_imediato = self._estados_reforcos['estado_final']\n self.reforco += self._reforco_imediato\n self._finalizado = self._estados_finalizado['estado_final']\n self._estado_texto = self._estados_texto['estado_final']\n print('\\tReforço acumulado de {0}'.format(self.reforco))\n self._estado_acao = ''\n\n def _pacote_acoes(self):\n if self._valor_estado in [1, 4, 7, 8, 10, 11, 13]:\n return [0]\n elif self._valor_estado in [2]:\n return [0, 1, 2]\n elif self._valor_estado in [3, 5, 12]:\n return [0, 1]\n elif self._valor_estado in [9, 6]:\n return [0, 1, 2, 3]\n\n def checa_acao(self, acao):\n if acao in self._pacote_acoes():\n return True\n else:\n return False\n\n def read_1(self):\n return (self._estado_texto, self._estado_acao, self._espaco_acoes,\n self._reforco_imediato, self._finalizado)\n\n def read(self):\n return self._estado_texto, self._estado_acao, self._espaco_acoes\n\n def imprime_acao(self, acoes):\n for cont, acao in enumerate(acoes):\n print('\\t[{0}] {1}'.format(cont, acao))\n\n def emulador(self, acao):\n if self._valor_estado == 2 and acao == 0:\n return self._estados_texto['estado_6'], self._acao_textos[\n 'estado_6'], self._acao_dimensoes['estado_6'\n ], self._estados_reforcos['estado_6'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 2 and acao == 1:\n return self._estados_texto['estado_9'], self._acao_textos[\n 'estado_9'], self._acao_dimensoes['estado_9'\n ], self._estados_reforcos['estado_9'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado in [1, 3, 4] and acao == 0:\n return self._estados_texto['estado_2'], self._acao_textos[\n 'estado_2'], self._acao_dimensoes['estado_2'\n ], self._estados_reforcos['estado_2'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 3 and acao == 1:\n return self._estados_texto['estado_5'], self._acao_textos[\n 'estado_5'], self._acao_dimensoes['estado_5'\n ], self._estados_reforcos['estado_5'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 2 and acao == 2:\n return self._estados_texto['estado_4'], self._acao_textos[\n 'estado_4'], self._acao_dimensoes['estado_4'\n ], self._estados_reforcos['estado_4'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 5 and acao == 1:\n return self._estados_texto['estado_9'], self._acao_textos[\n 'estado_9'], self._acao_dimensoes['estado_9'\n ], self._estados_reforcos['estado_9'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 6 and acao == 1:\n return self._estados_texto['estado_7'], self._acao_textos[\n 'estado_7'], self._acao_dimensoes['estado_7'\n ], self._estados_reforcos['estado_7'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado in [7, 8] and acao == 0:\n return self._estados_texto['estado_6'], self._acao_textos[\n 'estado_6'], self._acao_dimensoes['estado_6'\n ], self._estados_reforcos['estado_6'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 6 and acao == 2:\n return self._estados_texto['estado_8'], self._acao_textos[\n 'estado_7'], self._acao_dimensoes['estado_7'\n ], self._estados_reforcos['estado_7'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 9 and acao == 1:\n return self._estados_texto['estado_10'], self._acao_textos[\n 'estado_10'], self._acao_dimensoes['estado_10'\n ], self._estados_reforcos['estado_10'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado in [6, 10, 11] and acao == 0:\n return self._estados_texto['estado_9'], self._acao_textos[\n 'estado_9'], self._acao_dimensoes['estado_9'\n ], self._estados_reforcos['estado_9'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 9 and acao == 2:\n return self._estados_texto['estado_11'], self._acao_textos[\n 'estado_10'], self._acao_dimensoes['estado_10'\n ], self._estados_reforcos['estado_10'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado in [5, 9, 13] and acao == 0:\n return self._estados_texto['estado_12'], self._acao_textos[\n 'estado_12'], self._acao_dimensoes['estado_12'\n ], self._estados_reforcos['estado_12'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 12 and acao == 0:\n return self._estados_texto['estado_13'], self._acao_textos[\n 'estado_13'], self._acao_dimensoes['estado_13'\n ], self._estados_reforcos['estado_13'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 12 and acao == 1:\n return self._estados_texto['estado_final'], self._acao_textos[\n 'estado_final'], self._acao_dimensoes['estado_final'\n ], self._estados_reforcos['estado_final'\n ], self._estados_finalizado['estado_final']\n elif self._valor_estado == 9 and acao == 3:\n return self._estados_texto['estado_6'], self._acao_textos[\n 'estado_6'], self._acao_dimensoes['estado_6'\n ], self._estados_reforcos['estado_6'\n ], self._estados_finalizado['estado_1']\n elif self._valor_estado == 6 and acao == 3:\n return self._estados_texto['estado_2'], self._acao_textos[\n 'estado_2'], self._acao_dimensoes['estado_2'\n ], self._estados_reforcos['estado_2'\n ], self._estados_finalizado['estado_1']\n", "step-5": "\"\"\"\n \\tSeja bem-vindo ao Admirável Mundo Novo!\n \\tO objetivo do jogo é dar suporte ao desenvolvimento de Agentes Inteligentes que utilizam Deep Reinforcement Learning\n \\tpara tarefas de Processamento de Linguagem Natural em língua portuguesa.\n \\tAutor: Gabriel Pontes (@ograndoptimist)\n\"\"\"\n\nimport random\n\nfrom source.emulador.textos import ESTADOS\nfrom source.emulador.textos import ACOES\nfrom source.emulador.textos import REFORCOS\nfrom source.emulador.textos import FINALIZADO\nfrom source.emulador.textos import DIMENSOES\n\n\nprint(__doc__)\n\n\nclass AdmiravelMundoNovo(object):\n def __init__(self):\n self.reforco = 0\n self._checa_estado = False\n self._estado_texto = None\n self._estado_acao = None\n self._finalizado = False\n self._espaco_acoes = None\n self._estados_texto = ESTADOS\n self._acao_textos = ACOES\n self._acao_dimensoes = DIMENSOES\n self._estados_reforcos = REFORCOS\n self._estados_finalizado = FINALIZADO\n self._valores_estados_iniciais = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n\n print(\"\\tO objetivo do jogo é coletar a chave preciosa de ouro.\" +\n \".\\n\\tPara tal, você precisa vasculhar a Ilha da Fantasia.\")\n print()\n\n self._escolha_estado_inicial()\n\n def _escolha_estado_inicial(self):\n escolha = random.choice(self._valores_estados_iniciais)\n\n if escolha == 1:\n self._estado_1()\n elif escolha == 2:\n self._estado_2()\n elif escolha == 3:\n self._estado_3()\n elif escolha == 4:\n self._estado_4()\n elif escolha == 5:\n self._estado_5()\n elif escolha == 6:\n self._estado_6()\n elif escolha == 7:\n self._estado_7()\n elif escolha == 8:\n self._estado_8()\n elif escolha == 9:\n self._estado_9()\n elif escolha == 10:\n self._estado_10()\n elif escolha == 11:\n self._estado_11()\n elif escolha == 12:\n self._estado_12()\n elif escolha == 13:\n self._estado_13()\n elif escolha == 14:\n self._estado_14()\n\n def transicao_estado(self, acao):\n if self._valor_estado == 2 and acao == 0:\n self._estado_6()\n elif self._valor_estado == 2 and acao == 1:\n self._estado_3()\n elif self._valor_estado in [1, 3, 4] and acao == 0:\n self._estado_2()\n elif self._valor_estado == 3 and acao == 1:\n self._estado_5()\n elif self._valor_estado == 2 and acao == 2:\n self._estado_4()\n elif self._valor_estado == 5 and acao == 1:\n self._estado_3()\n elif self._valor_estado == 6 and acao == 1:\n self._estado_7()\n elif self._valor_estado in [7, 8] and acao == 0:\n self._estado_6()\n elif self._valor_estado == 6 and acao == 2:\n self._estado_8()\n elif self._valor_estado in [6, 10, 11] and acao == 0:\n self._estado_9()\n elif self._valor_estado == 9 and acao == 1:\n self._estado_10()\n elif self._valor_estado == 9 and acao == 2:\n self._estado_11()\n elif self._valor_estado in [5, 9, 13] and acao == 0:\n self._estado_12()\n elif self._valor_estado == 12 and acao == 0:\n self._estado_13()\n elif self._valor_estado == 12 and acao == 1:\n self._estado_final()\n elif self._valor_estado == 9 and acao == 3:\n self._estado_6()\n elif self._valor_estado == 6 and acao == 3:\n self._estado_2()\n\n def _estado_1(self):\n self._reforco_imediato = self._estados_reforcos['estado_1']\n self.reforco += self._reforco_imediato\n self._valor_estado = 1\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_1']\n self._estado_acao = self._acao_textos['estado_1']\n self._espaco_acoes = self._acao_dimensoes['estado_1']\n\n def _estado_2(self):\n self._reforco_imediato = self._estados_reforcos['estado_2']\n self.reforco += self._reforco_imediato\n self._valor_estado = 2\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_2']\n self._estado_acao = self._acao_textos['estado_2']\n self._espaco_acoes = self._acao_dimensoes['estado_2']\n\n def _estado_3(self):\n self._reforco_imediato = self._estados_reforcos['estado_3']\n self.reforco += self._reforco_imediato\n self._valor_estado = 3\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_3']\n self._estado_acao = self._acao_textos['estado_3']\n self._espaco_acoes = self._acao_dimensoes['estado_3']\n\n def _estado_4(self):\n self._reforco_imediato = self._estados_reforcos['estado_4']\n self.reforco += self._reforco_imediato\n self._valor_estado = 4\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_4']\n self._estado_acao = self._acao_textos['estado_4']\n self._espaco_acoes = self._acao_dimensoes['estado_4']\n\n def _estado_5(self):\n self._reforco_imediato = self._estados_reforcos['estado_5']\n self.reforco += self._reforco_imediato\n self._valor_estado = 5\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_5']\n self._estado_acao = self._acao_textos['estado_5']\n self._espaco_acoes = self._acao_dimensoes['estado_5']\n\n def _estado_6(self):\n self._reforco_imediato = self._estados_reforcos['estado_6']\n self.reforco += self._reforco_imediato\n self._valor_estado = 6\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_6']\n self._estado_acao = self._acao_textos['estado_6']\n self._espaco_acoes = self._acao_dimensoes['estado_6']\n\n def _estado_7(self):\n self._reforco_imediato = self._estados_reforcos['estado_7']\n self.reforco += self._reforco_imediato\n self._valor_estado = 7\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_7']\n self._estado_acao = self._acao_textos['estado_7']\n self._espaco_acoes = self._acao_dimensoes['estado_7']\n\n def _estado_8(self):\n self._reforco_imediato = self._estados_reforcos['estado_7']\n self.reforco += self._reforco_imediato\n self._valor_estado = 8\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_8']\n self._estado_acao = self._acao_textos['estado_7']\n self._espaco_acoes = self._acao_dimensoes['estado_7']\n\n def _estado_9(self):\n self._reforco_imediato = self._estados_reforcos['estado_9']\n self.reforco += self._reforco_imediato\n self._valor_estado = 9\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_9']\n self._estado_acao = self._acao_textos['estado_9']\n self._espaco_acoes = self._acao_dimensoes['estado_9']\n\n def _estado_10(self):\n self._reforco_imediato = self._estados_reforcos['estado_10']\n self.reforco += self._reforco_imediato\n self._valor_estado = 10\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_10']\n self._estado_acao = self._acao_textos['estado_10']\n self._espaco_acoes = self._acao_dimensoes['estado_10']\n\n def _estado_11(self):\n self._reforco_imediato = self._estados_reforcos['estado_10']\n self.reforco += self._reforco_imediato\n self._valor_estado = 11\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_11']\n self._estado_acao = self._acao_textos['estado_10']\n self._espaco_acoes = self._acao_dimensoes['estado_10']\n\n def _estado_12(self):\n self._reforco_imediato = self._estados_reforcos['estado_12']\n self.reforco += self._reforco_imediato\n self._valor_estado = 12\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_12']\n self._estado_acao = self._acao_textos['estado_12']\n self._espaco_acoes = self._acao_dimensoes['estado_12']\n\n def _estado_13(self):\n self._reforco_imediato = self._estados_reforcos['estado_13']\n self.reforco -= self._reforco_imediato\n self._valor_estado = 13\n self._finalizado = self._estados_finalizado['estado_1']\n self._estado_texto = self._estados_texto['estado_13']\n self._estado_acao = self._acao_textos['estado_13']\n self._espaco_acoes = self._acao_dimensoes['estado_13']\n\n def _estado_14(self):\n self._reforco_imediato = self._estados_reforcos['estado_14']\n self.reforco -= self._reforco_imediato\n self._valor_estado = 14\n self._finalizado = self._estados_finalizado['estado_14']\n self._estado_texto = self._estados_texto['estado_14']\n self._estado_acao = self._acao_textos['estado_14']\n self._espaco_acoes = self._acao_dimensoes['estado_14']\n\n def _estado_final(self):\n self._reforco_imediato = self._estados_reforcos['estado_final']\n self.reforco += self._reforco_imediato\n self._finalizado = self._estados_finalizado['estado_final']\n self._estado_texto = self._estados_texto['estado_final']\n print(\"\\tReforço acumulado de {0}\".format(self.reforco))\n self._estado_acao = \"\"\n\n def _pacote_acoes(self):\n if self._valor_estado in [1, 4, 7, 8, 10, 11, 13]:\n return [0]\n elif self._valor_estado in [2]:\n return [0, 1, 2]\n elif self._valor_estado in [3, 5, 12]:\n return [0, 1]\n elif self._valor_estado in [9, 6]:\n return [0, 1, 2, 3]\n\n def checa_acao(self, acao):\n if acao in self._pacote_acoes():\n return True\n else:\n return False\n\n def read_1(self):\n return self._estado_texto, self._estado_acao, self._espaco_acoes, self._reforco_imediato, self._finalizado\n\n def read(self):\n return self._estado_texto, self._estado_acao, self._espaco_acoes\n\n def imprime_acao(self, acoes):\n for cont, acao in enumerate(acoes):\n print(\"\\t[{0}] {1}\".format(cont, acao))\n\n def emulador(self, acao):\n if self._valor_estado == 2 and acao == 0: # ok\n return self._estados_texto['estado_6'], self._acao_textos['estado_6'], self._acao_dimensoes['estado_6'], \\\n self._estados_reforcos['estado_6'], self._estados_finalizado['estado_1']\n elif self._valor_estado == 2 and acao == 1: # ok\n return self._estados_texto['estado_9'], self._acao_textos['estado_9'], self._acao_dimensoes['estado_9'], \\\n self._estados_reforcos['estado_9'], self._estados_finalizado['estado_1']\n elif self._valor_estado in [1, 3, 4] and acao == 0:\n return self._estados_texto['estado_2'], self._acao_textos['estado_2'], self._acao_dimensoes['estado_2'], \\\n self._estados_reforcos['estado_2'], self._estados_finalizado['estado_1']\n elif self._valor_estado == 3 and acao == 1:\n return self._estados_texto['estado_5'], self._acao_textos['estado_5'], self._acao_dimensoes['estado_5'], \\\n self._estados_reforcos['estado_5'], self._estados_finalizado['estado_1']\n elif self._valor_estado == 2 and acao == 2: # ok\n return self._estados_texto['estado_4'], self._acao_textos['estado_4'], self._acao_dimensoes['estado_4'], \\\n self._estados_reforcos['estado_4'], self._estados_finalizado['estado_1']\n elif self._valor_estado == 5 and acao == 1:\n return self._estados_texto['estado_9'], self._acao_textos['estado_9'], self._acao_dimensoes['estado_9'], \\\n self._estados_reforcos['estado_9'], self._estados_finalizado['estado_1']\n elif self._valor_estado == 6 and acao == 1:\n return self._estados_texto['estado_7'], self._acao_textos['estado_7'], self._acao_dimensoes['estado_7'], \\\n self._estados_reforcos['estado_7'], self._estados_finalizado['estado_1']\n elif self._valor_estado in [7, 8] and acao == 0:\n return self._estados_texto['estado_6'], self._acao_textos['estado_6'], self._acao_dimensoes['estado_6'], \\\n self._estados_reforcos['estado_6'], self._estados_finalizado['estado_1']\n elif self._valor_estado == 6 and acao == 2:\n return self._estados_texto['estado_8'], self._acao_textos['estado_7'], self._acao_dimensoes['estado_7'], \\\n self._estados_reforcos['estado_7'], self._estados_finalizado['estado_1']\n elif self._valor_estado == 9 and acao == 1:\n return self._estados_texto['estado_10'], self._acao_textos['estado_10'], self._acao_dimensoes['estado_10'], \\\n self._estados_reforcos['estado_10'], self._estados_finalizado['estado_1']\n elif self._valor_estado in [6, 10, 11] and acao == 0:\n return self._estados_texto['estado_9'], self._acao_textos['estado_9'], self._acao_dimensoes['estado_9'], \\\n self._estados_reforcos['estado_9'], self._estados_finalizado['estado_1']\n elif self._valor_estado == 9 and acao == 2:\n return self._estados_texto['estado_11'], self._acao_textos['estado_10'], self._acao_dimensoes['estado_10'], \\\n self._estados_reforcos['estado_10'], self._estados_finalizado['estado_1']\n elif self._valor_estado in [5, 9, 13] and acao == 0:\n return self._estados_texto['estado_12'], self._acao_textos['estado_12'], self._acao_dimensoes['estado_12'], \\\n self._estados_reforcos['estado_12'], self._estados_finalizado['estado_1']\n elif self._valor_estado == 12 and acao == 0:\n return self._estados_texto['estado_13'], self._acao_textos['estado_13'], self._acao_dimensoes['estado_13'], \\\n self._estados_reforcos['estado_13'], self._estados_finalizado['estado_1']\n elif self._valor_estado == 12 and acao == 1:\n return self._estados_texto['estado_final'], self._acao_textos['estado_final'], self._acao_dimensoes[\n 'estado_final'], self._estados_reforcos['estado_final'], self._estados_finalizado['estado_final']\n elif self._valor_estado == 9 and acao == 3:\n return self._estados_texto['estado_6'], self._acao_textos['estado_6'], self._acao_dimensoes['estado_6'], \\\n self._estados_reforcos['estado_6'], self._estados_finalizado['estado_1']\n elif self._valor_estado == 6 and acao == 3:\n return self._estados_texto['estado_2'], self._acao_textos['estado_2'], self._acao_dimensoes['estado_2'], \\\n self._estados_reforcos['estado_2'], self._estados_finalizado['estado_1']\n\n", "step-ids": [ 18, 21, 23, 25, 28 ] }
[ 18, 21, 23, 25, 28 ]
#!/usr/bin/env python # encoding: utf-8 """ @author: swensun @github:https://github.com/yunshuipiao @software: python @file: encode_decode.py @desc: 字符串编解码 @hint: """ def encode(strs): """Encodes a list of strings to a single string. :type strs: List[str] :rtype: str """ res = '' for string in strs.split(): res += str(len(string)) + ":" + string return res def decode(string): strs = [] i = 0 while i < len(string): index = string.find(":", i) # print(index) size = int(string[i:index]) strs.append(string[index + 1: index + 1 + size]) i = index + 1 + size return strs if __name__ == '__main__': strs = "keno is awesome" r = encode(strs) print(r) r = decode(r) print(r)
normal
{ "blob_id": "2561db1264fe399db85460e9f32213b70ddf03ff", "index": 1864, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef encode(strs):\n \"\"\"Encodes a list of strings to a single string.\n :type strs: List[str]\n :rtype: str\n \"\"\"\n res = ''\n for string in strs.split():\n res += str(len(string)) + ':' + string\n return res\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef encode(strs):\n \"\"\"Encodes a list of strings to a single string.\n :type strs: List[str]\n :rtype: str\n \"\"\"\n res = ''\n for string in strs.split():\n res += str(len(string)) + ':' + string\n return res\n\n\ndef decode(string):\n strs = []\n i = 0\n while i < len(string):\n index = string.find(':', i)\n size = int(string[i:index])\n strs.append(string[index + 1:index + 1 + size])\n i = index + 1 + size\n return strs\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef encode(strs):\n \"\"\"Encodes a list of strings to a single string.\n :type strs: List[str]\n :rtype: str\n \"\"\"\n res = ''\n for string in strs.split():\n res += str(len(string)) + ':' + string\n return res\n\n\ndef decode(string):\n strs = []\n i = 0\n while i < len(string):\n index = string.find(':', i)\n size = int(string[i:index])\n strs.append(string[index + 1:index + 1 + size])\n i = index + 1 + size\n return strs\n\n\nif __name__ == '__main__':\n strs = 'keno is awesome'\n r = encode(strs)\n print(r)\n r = decode(r)\n print(r)\n", "step-5": "#!/usr/bin/env python\n\n# encoding: utf-8\n\n\"\"\"\n@author: swensun\n\n@github:https://github.com/yunshuipiao\n\n@software: python\n\n@file: encode_decode.py\n\n@desc: 字符串编解码\n\n@hint:\n\"\"\"\n\n\ndef encode(strs):\n \"\"\"Encodes a list of strings to a single string.\n :type strs: List[str]\n :rtype: str\n \"\"\"\n res = ''\n for string in strs.split():\n res += str(len(string)) + \":\" + string\n return res\n\ndef decode(string):\n strs = []\n i = 0\n while i < len(string):\n index = string.find(\":\", i)\n # print(index)\n size = int(string[i:index])\n strs.append(string[index + 1: index + 1 + size])\n i = index + 1 + size\n return strs\n\n\nif __name__ == '__main__':\n strs = \"keno is awesome\"\n r = encode(strs)\n print(r)\n r = decode(r)\n print(r)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/python # -*- coding: utf-8 -*- """Tests for the Spotlight Volume configuration plist plugin.""" import unittest # pylint: disable=unused-import from plaso.formatters import plist as plist_formatter from plaso.parsers import plist from plaso.parsers.plist_plugins import spotlight_volume from tests.parsers.plist_plugins import test_lib class SpotlightVolumePluginTest(test_lib.PlistPluginTestCase): """Tests for the Spotlight Volume configuration plist plugin.""" def setUp(self): """Makes preparations before running an individual test.""" self._plugin = spotlight_volume.SpotlightVolumePlugin() self._parser = plist.PlistParser() def testProcess(self): """Tests the Process function.""" plist_name = u'VolumeConfiguration.plist' event_queue_consumer = self._ParsePlistFileWithPlugin( self._parser, self._plugin, [plist_name], plist_name) event_objects = self._GetEventObjectsFromQueue(event_queue_consumer) self.assertEqual(len(event_objects), 2) timestamps = [] for event_object in event_objects: timestamps.append(event_object.timestamp) expected_timestamps = frozenset([ 1372139683000000, 1369657656000000]) self.assertTrue(set(timestamps) == expected_timestamps) event_object = event_objects[0] self.assertEqual(event_object.key, u'') self.assertEqual(event_object.root, u'/Stores') expected_desc = (u'Spotlight Volume 4D4BFEB5-7FE6-4033-AAAA-' u'AAAABBBBCCCCDDDD (/.MobileBackups) activated.') self.assertEqual(event_object.desc, expected_desc) expected_string = u'/Stores/ {0:s}'.format(expected_desc) expected_short = expected_string[:77] + u'...' self._TestGetMessageStrings( event_object, expected_string, expected_short) if __name__ == '__main__': unittest.main()
normal
{ "blob_id": "6d1b882af2a027f2eecaa3a881dbcab1e3a3b92b", "index": 9608, "step-1": "<mask token>\n\n\nclass SpotlightVolumePluginTest(test_lib.PlistPluginTestCase):\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass SpotlightVolumePluginTest(test_lib.PlistPluginTestCase):\n \"\"\"Tests for the Spotlight Volume configuration plist plugin.\"\"\"\n\n def setUp(self):\n \"\"\"Makes preparations before running an individual test.\"\"\"\n self._plugin = spotlight_volume.SpotlightVolumePlugin()\n self._parser = plist.PlistParser()\n\n def testProcess(self):\n \"\"\"Tests the Process function.\"\"\"\n plist_name = u'VolumeConfiguration.plist'\n event_queue_consumer = self._ParsePlistFileWithPlugin(self._parser,\n self._plugin, [plist_name], plist_name)\n event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)\n self.assertEqual(len(event_objects), 2)\n timestamps = []\n for event_object in event_objects:\n timestamps.append(event_object.timestamp)\n expected_timestamps = frozenset([1372139683000000, 1369657656000000])\n self.assertTrue(set(timestamps) == expected_timestamps)\n event_object = event_objects[0]\n self.assertEqual(event_object.key, u'')\n self.assertEqual(event_object.root, u'/Stores')\n expected_desc = (\n u'Spotlight Volume 4D4BFEB5-7FE6-4033-AAAA-AAAABBBBCCCCDDDD (/.MobileBackups) activated.'\n )\n self.assertEqual(event_object.desc, expected_desc)\n expected_string = u'/Stores/ {0:s}'.format(expected_desc)\n expected_short = expected_string[:77] + u'...'\n self._TestGetMessageStrings(event_object, expected_string,\n expected_short)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass SpotlightVolumePluginTest(test_lib.PlistPluginTestCase):\n \"\"\"Tests for the Spotlight Volume configuration plist plugin.\"\"\"\n\n def setUp(self):\n \"\"\"Makes preparations before running an individual test.\"\"\"\n self._plugin = spotlight_volume.SpotlightVolumePlugin()\n self._parser = plist.PlistParser()\n\n def testProcess(self):\n \"\"\"Tests the Process function.\"\"\"\n plist_name = u'VolumeConfiguration.plist'\n event_queue_consumer = self._ParsePlistFileWithPlugin(self._parser,\n self._plugin, [plist_name], plist_name)\n event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)\n self.assertEqual(len(event_objects), 2)\n timestamps = []\n for event_object in event_objects:\n timestamps.append(event_object.timestamp)\n expected_timestamps = frozenset([1372139683000000, 1369657656000000])\n self.assertTrue(set(timestamps) == expected_timestamps)\n event_object = event_objects[0]\n self.assertEqual(event_object.key, u'')\n self.assertEqual(event_object.root, u'/Stores')\n expected_desc = (\n u'Spotlight Volume 4D4BFEB5-7FE6-4033-AAAA-AAAABBBBCCCCDDDD (/.MobileBackups) activated.'\n )\n self.assertEqual(event_object.desc, expected_desc)\n expected_string = u'/Stores/ {0:s}'.format(expected_desc)\n expected_short = expected_string[:77] + u'...'\n self._TestGetMessageStrings(event_object, expected_string,\n expected_short)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-4": "<mask token>\nimport unittest\nfrom plaso.formatters import plist as plist_formatter\nfrom plaso.parsers import plist\nfrom plaso.parsers.plist_plugins import spotlight_volume\nfrom tests.parsers.plist_plugins import test_lib\n\n\nclass SpotlightVolumePluginTest(test_lib.PlistPluginTestCase):\n \"\"\"Tests for the Spotlight Volume configuration plist plugin.\"\"\"\n\n def setUp(self):\n \"\"\"Makes preparations before running an individual test.\"\"\"\n self._plugin = spotlight_volume.SpotlightVolumePlugin()\n self._parser = plist.PlistParser()\n\n def testProcess(self):\n \"\"\"Tests the Process function.\"\"\"\n plist_name = u'VolumeConfiguration.plist'\n event_queue_consumer = self._ParsePlistFileWithPlugin(self._parser,\n self._plugin, [plist_name], plist_name)\n event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)\n self.assertEqual(len(event_objects), 2)\n timestamps = []\n for event_object in event_objects:\n timestamps.append(event_object.timestamp)\n expected_timestamps = frozenset([1372139683000000, 1369657656000000])\n self.assertTrue(set(timestamps) == expected_timestamps)\n event_object = event_objects[0]\n self.assertEqual(event_object.key, u'')\n self.assertEqual(event_object.root, u'/Stores')\n expected_desc = (\n u'Spotlight Volume 4D4BFEB5-7FE6-4033-AAAA-AAAABBBBCCCCDDDD (/.MobileBackups) activated.'\n )\n self.assertEqual(event_object.desc, expected_desc)\n expected_string = u'/Stores/ {0:s}'.format(expected_desc)\n expected_short = expected_string[:77] + u'...'\n self._TestGetMessageStrings(event_object, expected_string,\n expected_short)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the Spotlight Volume configuration plist plugin.\"\"\"\n\nimport unittest\n\n# pylint: disable=unused-import\nfrom plaso.formatters import plist as plist_formatter\nfrom plaso.parsers import plist\nfrom plaso.parsers.plist_plugins import spotlight_volume\n\nfrom tests.parsers.plist_plugins import test_lib\n\n\nclass SpotlightVolumePluginTest(test_lib.PlistPluginTestCase):\n \"\"\"Tests for the Spotlight Volume configuration plist plugin.\"\"\"\n\n def setUp(self):\n \"\"\"Makes preparations before running an individual test.\"\"\"\n self._plugin = spotlight_volume.SpotlightVolumePlugin()\n self._parser = plist.PlistParser()\n\n def testProcess(self):\n \"\"\"Tests the Process function.\"\"\"\n plist_name = u'VolumeConfiguration.plist'\n event_queue_consumer = self._ParsePlistFileWithPlugin(\n self._parser, self._plugin, [plist_name], plist_name)\n event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)\n\n self.assertEqual(len(event_objects), 2)\n\n timestamps = []\n for event_object in event_objects:\n timestamps.append(event_object.timestamp)\n expected_timestamps = frozenset([\n 1372139683000000, 1369657656000000])\n self.assertTrue(set(timestamps) == expected_timestamps)\n\n event_object = event_objects[0]\n self.assertEqual(event_object.key, u'')\n self.assertEqual(event_object.root, u'/Stores')\n expected_desc = (u'Spotlight Volume 4D4BFEB5-7FE6-4033-AAAA-'\n u'AAAABBBBCCCCDDDD (/.MobileBackups) activated.')\n self.assertEqual(event_object.desc, expected_desc)\n expected_string = u'/Stores/ {0:s}'.format(expected_desc)\n expected_short = expected_string[:77] + u'...'\n self._TestGetMessageStrings(\n event_object, expected_string, expected_short)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-ids": [ 1, 4, 5, 6, 7 ] }
[ 1, 4, 5, 6, 7 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> parser.add_argument('infile', help='the file list to be processed') parser.add_argument('-d', '--directory', default='./', help= 'directory where files are located') parser.add_argument('-s', '--suffix', default='_EmsRawEvent.root', help= 'suffix of the input file') parser.add_argument('--elist_suffix', help= 'suffix of the file containing the event list, empty if not used') parser.add_argument('--elist_dir', default='/', help= 'directory containing the event list') parser.add_argument('--elist_name', default='ems_rate_elist', help= 'name of the event list') <|reserved_special_token_0|> if args.elist_suffix: list_elist = batch.get_list(args.infile, args.elist_suffix, in_dir) for fin, felist in zip(list_input, list_elist): command = [exec_bin, macro, fin, felist, args.elist_dir, args. elist_name] print(command) process = subprocess.Popen(command) process.wait() else: for fin in list_input: command = [exec_bin, macro, fin] print(command) process = subprocess.Popen(command) process.wait() <|reserved_special_token_1|> <|reserved_special_token_0|> vmc_dir = os.environ['VMCWORKDIR'] macro = os.path.join(vmc_dir, 'macro/koala/daq_tasks/export_ems.C') exec_bin = os.path.join(vmc_dir, 'build/bin/koa_execute') parser = argparse.ArgumentParser() parser.add_argument('infile', help='the file list to be processed') parser.add_argument('-d', '--directory', default='./', help= 'directory where files are located') parser.add_argument('-s', '--suffix', default='_EmsRawEvent.root', help= 'suffix of the input file') parser.add_argument('--elist_suffix', help= 'suffix of the file containing the event list, empty if not used') parser.add_argument('--elist_dir', default='/', help= 'directory containing the event list') parser.add_argument('--elist_name', default='ems_rate_elist', help= 'name of the event list') args = parser.parse_args() in_dir = os.path.expanduser(args.directory) list_input = batch.get_list(args.infile, args.suffix, in_dir) if args.elist_suffix: list_elist = batch.get_list(args.infile, args.elist_suffix, in_dir) for fin, felist in zip(list_input, list_elist): command = [exec_bin, macro, fin, felist, args.elist_dir, args. elist_name] print(command) process = subprocess.Popen(command) process.wait() else: for fin in list_input: command = [exec_bin, macro, fin] print(command) process = subprocess.Popen(command) process.wait() <|reserved_special_token_1|> import argparse import os import subprocess import batch vmc_dir = os.environ['VMCWORKDIR'] macro = os.path.join(vmc_dir, 'macro/koala/daq_tasks/export_ems.C') exec_bin = os.path.join(vmc_dir, 'build/bin/koa_execute') parser = argparse.ArgumentParser() parser.add_argument('infile', help='the file list to be processed') parser.add_argument('-d', '--directory', default='./', help= 'directory where files are located') parser.add_argument('-s', '--suffix', default='_EmsRawEvent.root', help= 'suffix of the input file') parser.add_argument('--elist_suffix', help= 'suffix of the file containing the event list, empty if not used') parser.add_argument('--elist_dir', default='/', help= 'directory containing the event list') parser.add_argument('--elist_name', default='ems_rate_elist', help= 'name of the event list') args = parser.parse_args() in_dir = os.path.expanduser(args.directory) list_input = batch.get_list(args.infile, args.suffix, in_dir) if args.elist_suffix: list_elist = batch.get_list(args.infile, args.elist_suffix, in_dir) for fin, felist in zip(list_input, list_elist): command = [exec_bin, macro, fin, felist, args.elist_dir, args. elist_name] print(command) process = subprocess.Popen(command) process.wait() else: for fin in list_input: command = [exec_bin, macro, fin] print(command) process = subprocess.Popen(command) process.wait() <|reserved_special_token_1|> #!/usr/bin/python import argparse import os import subprocess import batch vmc_dir = os.environ['VMCWORKDIR'] macro = os.path.join(vmc_dir, 'macro/koala/daq_tasks/export_ems.C') exec_bin = os.path.join(vmc_dir,'build/bin/koa_execute') # arguments definitions parser = argparse.ArgumentParser() parser.add_argument("infile",help="the file list to be processed") parser.add_argument("-d","--directory", default="./", help="directory where files are located") parser.add_argument("-s","--suffix", default="_EmsRawEvent.root", help="suffix of the input file") parser.add_argument("--elist_suffix", help="suffix of the file containing the event list, empty if not used") parser.add_argument("--elist_dir", default="/", help="directory containing the event list") parser.add_argument("--elist_name", default="ems_rate_elist", help="name of the event list") args = parser.parse_args() in_dir = os.path.expanduser(args.directory) # add rec each file in the list list_input = batch.get_list(args.infile, args.suffix, in_dir) if args.elist_suffix: list_elist = batch.get_list(args.infile, args.elist_suffix, in_dir) for fin, felist in zip(list_input, list_elist): command = [exec_bin, macro, fin, felist, args.elist_dir, args.elist_name] print(command) process = subprocess.Popen(command) process.wait() else: for fin in list_input: command = [exec_bin, macro, fin] print(command) process = subprocess.Popen(command) process.wait()
flexible
{ "blob_id": "58c7b405096a5fdc5eeacb5e5f314f2d1bb85af6", "index": 6229, "step-1": "<mask token>\n", "step-2": "<mask token>\nparser.add_argument('infile', help='the file list to be processed')\nparser.add_argument('-d', '--directory', default='./', help=\n 'directory where files are located')\nparser.add_argument('-s', '--suffix', default='_EmsRawEvent.root', help=\n 'suffix of the input file')\nparser.add_argument('--elist_suffix', help=\n 'suffix of the file containing the event list, empty if not used')\nparser.add_argument('--elist_dir', default='/', help=\n 'directory containing the event list')\nparser.add_argument('--elist_name', default='ems_rate_elist', help=\n 'name of the event list')\n<mask token>\nif args.elist_suffix:\n list_elist = batch.get_list(args.infile, args.elist_suffix, in_dir)\n for fin, felist in zip(list_input, list_elist):\n command = [exec_bin, macro, fin, felist, args.elist_dir, args.\n elist_name]\n print(command)\n process = subprocess.Popen(command)\n process.wait()\nelse:\n for fin in list_input:\n command = [exec_bin, macro, fin]\n print(command)\n process = subprocess.Popen(command)\n process.wait()\n", "step-3": "<mask token>\nvmc_dir = os.environ['VMCWORKDIR']\nmacro = os.path.join(vmc_dir, 'macro/koala/daq_tasks/export_ems.C')\nexec_bin = os.path.join(vmc_dir, 'build/bin/koa_execute')\nparser = argparse.ArgumentParser()\nparser.add_argument('infile', help='the file list to be processed')\nparser.add_argument('-d', '--directory', default='./', help=\n 'directory where files are located')\nparser.add_argument('-s', '--suffix', default='_EmsRawEvent.root', help=\n 'suffix of the input file')\nparser.add_argument('--elist_suffix', help=\n 'suffix of the file containing the event list, empty if not used')\nparser.add_argument('--elist_dir', default='/', help=\n 'directory containing the event list')\nparser.add_argument('--elist_name', default='ems_rate_elist', help=\n 'name of the event list')\nargs = parser.parse_args()\nin_dir = os.path.expanduser(args.directory)\nlist_input = batch.get_list(args.infile, args.suffix, in_dir)\nif args.elist_suffix:\n list_elist = batch.get_list(args.infile, args.elist_suffix, in_dir)\n for fin, felist in zip(list_input, list_elist):\n command = [exec_bin, macro, fin, felist, args.elist_dir, args.\n elist_name]\n print(command)\n process = subprocess.Popen(command)\n process.wait()\nelse:\n for fin in list_input:\n command = [exec_bin, macro, fin]\n print(command)\n process = subprocess.Popen(command)\n process.wait()\n", "step-4": "import argparse\nimport os\nimport subprocess\nimport batch\nvmc_dir = os.environ['VMCWORKDIR']\nmacro = os.path.join(vmc_dir, 'macro/koala/daq_tasks/export_ems.C')\nexec_bin = os.path.join(vmc_dir, 'build/bin/koa_execute')\nparser = argparse.ArgumentParser()\nparser.add_argument('infile', help='the file list to be processed')\nparser.add_argument('-d', '--directory', default='./', help=\n 'directory where files are located')\nparser.add_argument('-s', '--suffix', default='_EmsRawEvent.root', help=\n 'suffix of the input file')\nparser.add_argument('--elist_suffix', help=\n 'suffix of the file containing the event list, empty if not used')\nparser.add_argument('--elist_dir', default='/', help=\n 'directory containing the event list')\nparser.add_argument('--elist_name', default='ems_rate_elist', help=\n 'name of the event list')\nargs = parser.parse_args()\nin_dir = os.path.expanduser(args.directory)\nlist_input = batch.get_list(args.infile, args.suffix, in_dir)\nif args.elist_suffix:\n list_elist = batch.get_list(args.infile, args.elist_suffix, in_dir)\n for fin, felist in zip(list_input, list_elist):\n command = [exec_bin, macro, fin, felist, args.elist_dir, args.\n elist_name]\n print(command)\n process = subprocess.Popen(command)\n process.wait()\nelse:\n for fin in list_input:\n command = [exec_bin, macro, fin]\n print(command)\n process = subprocess.Popen(command)\n process.wait()\n", "step-5": "#!/usr/bin/python\n\nimport argparse\nimport os\nimport subprocess\nimport batch\n\nvmc_dir = os.environ['VMCWORKDIR']\nmacro = os.path.join(vmc_dir, 'macro/koala/daq_tasks/export_ems.C')\nexec_bin = os.path.join(vmc_dir,'build/bin/koa_execute')\n\n# arguments definitions\nparser = argparse.ArgumentParser()\nparser.add_argument(\"infile\",help=\"the file list to be processed\")\nparser.add_argument(\"-d\",\"--directory\",\n default=\"./\",\n help=\"directory where files are located\")\nparser.add_argument(\"-s\",\"--suffix\",\n default=\"_EmsRawEvent.root\",\n help=\"suffix of the input file\")\nparser.add_argument(\"--elist_suffix\",\n help=\"suffix of the file containing the event list, empty if not used\")\nparser.add_argument(\"--elist_dir\",\n default=\"/\",\n help=\"directory containing the event list\")\nparser.add_argument(\"--elist_name\",\n default=\"ems_rate_elist\",\n help=\"name of the event list\")\n\nargs = parser.parse_args()\n\nin_dir = os.path.expanduser(args.directory)\n\n# add rec each file in the list\nlist_input = batch.get_list(args.infile, args.suffix, in_dir)\n\nif args.elist_suffix:\n list_elist = batch.get_list(args.infile, args.elist_suffix, in_dir)\n for fin, felist in zip(list_input, list_elist):\n command = [exec_bin, macro, fin, felist, args.elist_dir, args.elist_name]\n print(command)\n process = subprocess.Popen(command)\n process.wait()\nelse:\n for fin in list_input:\n command = [exec_bin, macro, fin]\n print(command)\n process = subprocess.Popen(command)\n process.wait()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class ActivityConfig(AppConfig): <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class ActivityConfig(AppConfig): name = 'apps.activity' <|reserved_special_token_1|> from django.apps import AppConfig class ActivityConfig(AppConfig): name = 'apps.activity'
flexible
{ "blob_id": "2a69aa0cd9d0e39ad82d6a354e956bdad0648797", "index": 2252, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass ActivityConfig(AppConfig):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass ActivityConfig(AppConfig):\n name = 'apps.activity'\n", "step-4": "from django.apps import AppConfig\n\n\nclass ActivityConfig(AppConfig):\n name = 'apps.activity'\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from . import * from ..utils.constants import NUM_SEARCH_RESULT def get_course_by_id(course_id): return Course.query.filter_by(id=course_id).first() def get_course_by_subject_and_course_num(subject_code, course_num): return Course.query.filter_by(subject_code=subject_code, course_num=course_num).first() def create_course(subject_code, course_num, title): optional_course = get_course_by_subject_and_course_num(subject_code, course_num) if optional_course: return optional_course course = Course(subject_code=subject_code, course_num=course_num, title=title) db.session.add(course) db.session.commit() return course def search_courses(query): results = Course.query.filter(Course.search_string.ilike("%{}%".format(query))).limit(NUM_SEARCH_RESULT) results = sorted(results, key = lambda r : find_query_index(r, query)) return results def find_query_index(course, key): try: return course.search_string.lower().index(key.lower()) except(ValueError): return -1 def clear_table(): Course.query.delete()
normal
{ "blob_id": "b3f0aae91c885d0e15ff3e456b5cab43fca65b67", "index": 4184, "step-1": "<mask token>\n\n\ndef get_course_by_id(course_id):\n return Course.query.filter_by(id=course_id).first()\n\n\n<mask token>\n\n\ndef create_course(subject_code, course_num, title):\n optional_course = get_course_by_subject_and_course_num(subject_code,\n course_num)\n if optional_course:\n return optional_course\n course = Course(subject_code=subject_code, course_num=course_num, title\n =title)\n db.session.add(course)\n db.session.commit()\n return course\n\n\ndef search_courses(query):\n results = Course.query.filter(Course.search_string.ilike('%{}%'.format(\n query))).limit(NUM_SEARCH_RESULT)\n results = sorted(results, key=lambda r: find_query_index(r, query))\n return results\n\n\n<mask token>\n\n\ndef clear_table():\n Course.query.delete()\n", "step-2": "<mask token>\n\n\ndef get_course_by_id(course_id):\n return Course.query.filter_by(id=course_id).first()\n\n\n<mask token>\n\n\ndef create_course(subject_code, course_num, title):\n optional_course = get_course_by_subject_and_course_num(subject_code,\n course_num)\n if optional_course:\n return optional_course\n course = Course(subject_code=subject_code, course_num=course_num, title\n =title)\n db.session.add(course)\n db.session.commit()\n return course\n\n\ndef search_courses(query):\n results = Course.query.filter(Course.search_string.ilike('%{}%'.format(\n query))).limit(NUM_SEARCH_RESULT)\n results = sorted(results, key=lambda r: find_query_index(r, query))\n return results\n\n\ndef find_query_index(course, key):\n try:\n return course.search_string.lower().index(key.lower())\n except ValueError:\n return -1\n\n\ndef clear_table():\n Course.query.delete()\n", "step-3": "<mask token>\n\n\ndef get_course_by_id(course_id):\n return Course.query.filter_by(id=course_id).first()\n\n\ndef get_course_by_subject_and_course_num(subject_code, course_num):\n return Course.query.filter_by(subject_code=subject_code, course_num=\n course_num).first()\n\n\ndef create_course(subject_code, course_num, title):\n optional_course = get_course_by_subject_and_course_num(subject_code,\n course_num)\n if optional_course:\n return optional_course\n course = Course(subject_code=subject_code, course_num=course_num, title\n =title)\n db.session.add(course)\n db.session.commit()\n return course\n\n\ndef search_courses(query):\n results = Course.query.filter(Course.search_string.ilike('%{}%'.format(\n query))).limit(NUM_SEARCH_RESULT)\n results = sorted(results, key=lambda r: find_query_index(r, query))\n return results\n\n\ndef find_query_index(course, key):\n try:\n return course.search_string.lower().index(key.lower())\n except ValueError:\n return -1\n\n\ndef clear_table():\n Course.query.delete()\n", "step-4": "from . import *\nfrom ..utils.constants import NUM_SEARCH_RESULT\n\n\ndef get_course_by_id(course_id):\n return Course.query.filter_by(id=course_id).first()\n\n\ndef get_course_by_subject_and_course_num(subject_code, course_num):\n return Course.query.filter_by(subject_code=subject_code, course_num=\n course_num).first()\n\n\ndef create_course(subject_code, course_num, title):\n optional_course = get_course_by_subject_and_course_num(subject_code,\n course_num)\n if optional_course:\n return optional_course\n course = Course(subject_code=subject_code, course_num=course_num, title\n =title)\n db.session.add(course)\n db.session.commit()\n return course\n\n\ndef search_courses(query):\n results = Course.query.filter(Course.search_string.ilike('%{}%'.format(\n query))).limit(NUM_SEARCH_RESULT)\n results = sorted(results, key=lambda r: find_query_index(r, query))\n return results\n\n\ndef find_query_index(course, key):\n try:\n return course.search_string.lower().index(key.lower())\n except ValueError:\n return -1\n\n\ndef clear_table():\n Course.query.delete()\n", "step-5": "from . import *\nfrom ..utils.constants import NUM_SEARCH_RESULT\n\n\ndef get_course_by_id(course_id):\n return Course.query.filter_by(id=course_id).first()\n\n\ndef get_course_by_subject_and_course_num(subject_code, course_num):\n return Course.query.filter_by(subject_code=subject_code, course_num=course_num).first()\n\n\ndef create_course(subject_code, course_num, title):\n optional_course = get_course_by_subject_and_course_num(subject_code, course_num)\n\n if optional_course:\n return optional_course\n\n course = Course(subject_code=subject_code, course_num=course_num, title=title)\n db.session.add(course)\n db.session.commit()\n return course\n\n\ndef search_courses(query):\n results = Course.query.filter(Course.search_string.ilike(\"%{}%\".format(query))).limit(NUM_SEARCH_RESULT)\n results = sorted(results, key = lambda r : find_query_index(r, query))\n return results\n\n\ndef find_query_index(course, key):\n try: \n return course.search_string.lower().index(key.lower()) \n except(ValueError): \n return -1\n\n\ndef clear_table():\n Course.query.delete()\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
import requests from bs4 import BeautifulSoup from urllib.request import urlretrieve import json import time #功能一:下载单一歌曲、歌词 def single_song(song_id,path,song_name): #下载单一歌曲,输入为歌曲id,保存路径,歌曲名称 song_url = "http://music.163.com/song/media/outer/url?id=%s" % song_id down_path = path +'\\'+ song_name + '.mp3' urlretrieve(song_url,down_path) print("歌曲下载完成:"+song_name) def save2txt(songname, lyric,path): #写进歌词到指定路径,并保存,输入为歌曲名称、歌词信息、保存路径 # print('正在保存歌曲:{}'.format(songname)) print("歌词下载完成:"+songname) lyric_path=path+'\\'+songname+'.txt' with open(lyric_path, 'a', encoding='utf-8')as f: f.write(lyric) def single_song_lyric(song_id,path,song_name): #下载单一歌曲的歌词,输入为歌曲id,保存路径,歌曲名称 url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(song_id) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'} html = requests.get(url, headers=headers).text json_obj = json.loads(html) initial_lyric = json_obj['lrc']['lyric'] reg = re.compile(r'\[.*\]') lyric = re.sub(reg, '', initial_lyric).strip() save2txt(song_name, lyric, path) time.sleep(1) #功能二:根据歌单url下载 def songs_from_list(url,path): #url:歌单网址;path:本地保存目录 下载某一歌单的所有歌曲(包括歌手页、排行榜) new_url = url.replace('/#', '') header = { 'Host': 'music.163.com', 'Referer': 'https://music.163.com/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0' } res = requests.get(new_url, headers=header).text r = BeautifulSoup(res, "html.parser") music_dict = {} result = r.find('ul', {'class', 'f-hide'}).find_all('a') for music in result: print(music) music_id = music.get('href').strip('/song?id=') music_name = music.text music_dict[music_id] = music_name for song_id in music_dict: song_url = "http://music.163.com/song/media/outer/url?id=%s" % song_id down_path=path+'\\'+music_dict[song_id]+'.mp3' # path = "C:\\Users\\ming-\\Downloads\\%s.mp3" % music_dict[song_id] # 添加数据 print( "正在下载:%s" % music_dict[song_id]) # text.see(END) # text.update() urlretrieve(song_url, down_path) def get_lyrics(songids): #根据歌曲id获取歌词,输入为歌曲Id url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(songids) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'} html = requests.get(url, headers=headers).text json_obj = json.loads(html) initial_lyric = json_obj['lrc']['lyric'] reg = re.compile(r'\[.*\]') lyric = re.sub(reg, '', initial_lyric).strip() return lyric def lyrics_from_list(url,path): #根据歌单下载歌曲歌词 new_url = url.replace('/#', '') header = { 'Host': 'music.163.com', 'Referer': 'https://music.163.com/', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0' } res = requests.get(new_url, headers=header).text r = BeautifulSoup(res, "html.parser") music_dict = {} result = r.find('ul', {'class', 'f-hide'}).find_all('a') for music in result: print(music) music_id = music.get('href').strip('/song?id=') music_name = music.text music_dict[music_id] = music_name songids=music_dict.keys() for i in songids: lyric=get_lyrics(i) save2txt(music_dict[i],lyric,path) time.sleep(1) #功能三:根据歌手下载 #获取歌手信息和id from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait import csv import re # chrome_driver = "D:\\software\\chromedriver_win32\\chromedriver.exe" #chromedriver的文件位置 # browser = webdriver.Chrome(executable_path = chrome_driver) # wait = WebDriverWait(browser, 5) # 设置等待时间 def get_singer(url): # 返回歌手名字和歌手id,输入为歌手详情页 chrome_driver = "D:\\software\\chromedriver_win32\\chromedriver.exe" # chromedriver的文件位置 browser = webdriver.Chrome(executable_path=chrome_driver) wait = WebDriverWait(browser, 5) # 设置等待时间 browser.get(url) browser.switch_to.frame('g_iframe') html = browser.page_source soup = BeautifulSoup(html, 'lxml') info = soup.select('.nm.nm-icn.f-thide.s-fc0') singername = [] singerid = [] for snames in info: name = snames.get_text() songid = str(re.findall('href="(.*?)"', str(snames))).split('=')[1].split('\'')[0] #正则表达式获取歌曲id singername.append(name) singerid.append(songid) return zip(singername, singerid) def get_data(url): data = [] for singernames, singerids in get_singer(url): info = {} info['歌手名字'] = singernames info['歌手ID'] = singerids data.append(info) return data def save2csv(url): print('保存歌手信息中...请稍后查看') with open('singer.csv', 'a', newline='', encoding='utf-8-sig') as f: # CSV 基本写入用 w,追加改模式 w 为 a fieldnames = ['歌手名字', '歌手ID'] writer = csv.DictWriter(f, fieldnames=fieldnames) writer.writeheader() data = get_data(url) print(data) writer.writerows(data) print('保存成功') def download_singer(): idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001, 6002, 6003, 7001, 7002, 7003] for id in idlist: url = 'https://music.163.com/#/discover/artist/cat?id={}&initial=-1'.format(id) save2csv(url) def get_id(singer_name): #根据歌手姓名获取对应的歌手id,输入为歌手姓名 file = "lib\\singer_info.csv" with open(file, 'r',encoding='utf-8-sig') as f: reader = csv.reader(f) name = [] id = [] for i in reader: name.append(i[0]) id.append(i[1]) a=name.index(singer_name) return id[a] #根据歌手姓名下载 def get_html(url): #通过代理获取网页信息,输入为指定网页url proxy_addr = {'http': '61.135.217.7:80'} # 用的代理 ip,如果被封或者失效,在http://www.xicidaili.com/换一个 headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'} try: html = requests.get(url, headers=headers, proxies=proxy_addr).text return html except BaseException: print('request error') pass def get_top50(html): #获取热度前50名的歌曲,并返回对应的歌曲名称和歌曲id,输入为歌手详情页 soup = BeautifulSoup(html, 'lxml') info = soup.select('.f-hide #song-list-pre-cache a') songname = [] songids = [] for sn in info: songnames = sn.getText() songname.append(songnames) for si in info: songid = str(re.findall('href="(.*?)"', str(si))).strip().split('=')[-1].split('\'')[0] # 用re查找,查找对象一定要是str类型 songids.append(songid) return zip(songname, songids) def lyrics_from_singername(name,path): #根据歌手姓名下载热度前50名歌曲的歌词 id=get_id(name) top50url = 'https://music.163.com/artist?id={}'.format(id) html = get_html(top50url) singer_infos = get_top50(html) for singer_info in singer_infos: lyric = get_lyrics(singer_info[1]) save2txt(singer_info[0], lyric, path) time.sleep(1) def save_song(songurl, path,songname): #下载指定链接的歌曲,并保存到指定路径,输入为歌曲下载链接、保存路径、歌曲名称 try: urlretrieve(songurl, path) print('歌曲下载完成:' + songname) except BaseException: print('下载失败:' + songname) pass def songs_from_singername(name,path): #根据歌手姓名下载歌曲到指定路径,输入为歌手姓名和保存路径 id=get_id(name) top50url = 'https://music.163.com/artist?id={}'.format(id) html = get_html(top50url) singer_infos = get_top50(html) for singer_info in singer_infos: songid = singer_info[1] songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(songid) songname = singer_info[0] # path = 'D:\\code_new\\pycharm\\yunmusic\\song' + songname + '.mp3' down_path=path+'\\'+songname+'.mp3' save_song(songurl, down_path,songname) time.sleep(1) def lyrics_from_singerid(id,path): #根据歌手id下载歌词,输入为歌手id和本地保存路径 top50url = 'https://music.163.com/artist?id={}'.format(id) html = get_html(top50url) singer_infos = get_top50(html) for singer_info in singer_infos: lyric = get_lyrics(singer_info[1]) save2txt(singer_info[0], lyric, path) time.sleep(1) def songs_from_singerid(id,path): #根据歌手id下载歌曲音频,输入为歌手id和本地保存路径 top50url = 'https://music.163.com/artist?id={}'.format(id) html = get_html(top50url) singer_infos = get_top50(html) for singer_info in singer_infos: songid = singer_info[1] songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(songid) songname = singer_info[0] # path = 'D:\\code_new\\pycharm\\yunmusic\\song' + songname + '.mp3' down_path = path + '\\' + songname + '.mp3' save_song(songurl, down_path, songname) time.sleep(1) #功能四:下载mv import requests import os import sys from urllib.parse import urlparse,parse_qs def http_get(api): my_cookie = { "version":0, "name":'appver', "value":'1.5.0.75771', "port":None, # "port_specified":False, "domain":'www.mydomain.com', # "domain_specified":False, # "domain_initial_dot":False, "path":'/', # "path_specified":True, "secure":False, "expires":None, "discard":True, "comment":None, "comment_url":None, "rest":{}, "rfc2109":False } s = requests.Session() s.headers.update({'Referer': "http://music.163.com/"}) s.cookies.set(**my_cookie) response = s.get(api) json_data = json.loads(response.text) return json_data def download_single_mv(id): #根据mvid下载 size = "720" #default 720p api = "http://music.163.com/api/mv/detail?id="+str(id)+"&type=mp4" json_data = http_get(api) if json_data["code"]==200: a = list(json_data["data"]["brs"].keys()) if size not in a: size = a[0] #如果没有720p,则选择最小的版本 mvurl = json_data["data"]["brs"][size] #mv网址 artist = json_data["data"]["artistName"] #歌手信息 song = json_data["data"]["name"] #歌曲信息 filename = '%s/[%s]%s.mp4' %(artist,size,song) if os.path.exists(filename)==False: if os.path.exists(artist)==False: os.makedirs(artist) def reporthook(blocknum, blocksize, totalsize): readsofar = blocknum * blocksize if totalsize > 0: percent = readsofar * 1e2 / totalsize s = "\r%5.1f%% %*d / %d" % ( percent, len(str(totalsize)), readsofar, totalsize) sys.stderr.write(s) if readsofar >= totalsize: # near the end sys.stderr.write("\n") else: # total size is unknown sys.stderr.write("read %d\n" % (readsofar,)) print("downloading "+filename) urlretrieve(mvurl,filename,reporthook) def download_mv_from_list(url): #批量下载歌单的mv资源 input=url.replace("#","") id = parse_qs(urlparse(input).query)["id"][0] if "playlist" in input: playlist_api = "http://music.163.com/api/playlist/detail?id=%s" % (id) json_data = http_get(playlist_api) for idx, mv in enumerate(json_data["result"]["tracks"]): #mv信息 download_single_mv(mv["mvid"]) print("downloaded:" + str(idx)) elif "album" in input: playlist_api = "http://music.163.com/api/album/%s" % (id) json_data = http_get(playlist_api) for idx, mv in enumerate(json_data["album"]["songs"]): if mv["mvid"] != None and mv["mvid"] != 0: download_single_mv(mv["mvid"]) print("downloaded:" + str(idx)) download_single_mv(id) #功能五:爬取歌曲评论并生成词云图 from jieba import posseg from PIL import Image import matplotlib.pyplot as plt import numpy as np import wordcloud def _content_generator(music_id): #根据歌曲id获取评论信息 url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host': 'music.163.com', 'Proxy-Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1', 'Cookie': '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36', } limit = 20 offset = 0 compiler = re.compile(r'[^\u4E00-\u9FA5^\u3000-\u303F^\uFF00-\uFFEF^0-9^a-z^A-Z]') while True: params = { 'limit': limit, 'offset': offset, } offset += limit r = requests.get(url, headers=headers, params=params) comments = r.json()['comments'] has_more = r.json()['more'] for t in comments: yield compiler.subn('', t['content'])[0] if not has_more: break class WangYiMusicWordCloud: #自定义类,生成词云图 stop_words = ['首歌'] def __init__(self, music_id, mask=None, font_path=None, stop_words=None): self.music_id = music_id #歌曲信息 self.mask = mask #背景图片 self.font_path = font_path #字体 if not stop_words is None: self.stop_words+=stop_words self.img_wordcloud = None def _cut_word(self, comment): #分词 word_pairs = posseg.lcut(comment, HMM=False) result = [] for t in word_pairs: if not (t.word in result or t.word in self.stop_words): result.append(t.word) return '/'.join(result) def get_words_text(self): #若已有评论文件则读取,若没有则爬取评论并保存 if os.path.isfile(f'{self.music_id}.txt'): print('评论文件已存在,读取文件...') with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f: return f.read() else: print('没有默认评论文件,开始爬取评论...') count = 0 text = [] comments = _content_generator(self.music_id) for t in comments: text.append(self._cut_word(t)) count += 1 print(f'\r已爬取 {count}条评论', end='') if count % 100 == 0: print(f'\r已爬取 {count}条评论, 休息 2s', end='') time.sleep(2) str_text = '\n'.join(text) with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f: f.write(str_text) print(f'\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt') return str_text def generate(self, **kwargs): default_kwargs = { 'background_color': "white", 'width': 1000, 'height': 860, 'margin': 2, 'max_words': 50, 'stopwords': wordcloud.STOPWORDS, } if not self.mask is None: default_kwargs['mask'] = np.array(Image.open(self.mask)) if not self.font_path is None: default_kwargs['font_path'] = self.font_path elif 'font_path' not in kwargs: raise ValueError('缺少参数 font_path') default_kwargs.update(kwargs) str_text = self.get_words_text() self.wordcloud = wordcloud.WordCloud(**default_kwargs) self.img_wordcloud = self.wordcloud.generate(str_text) def show_wordcloud(self): #生成词云图 if self.img_wordcloud is None: self.generate() plt.axis('off') plt.imshow(self.img_wordcloud) plt.show() def to_file(self, filename): #保存到本地 if not hasattr(self, 'wordcloud'): self.generate() self.wordcloud.to_file(filename) def get_wordcloud(music_id,mask,font,path): #执行函数 wordcloud_obj = WangYiMusicWordCloud(music_id, mask=mask, font_path=font) wordcloud_obj.show_wordcloud() result=path+'\\'+'result.jpg' wordcloud_obj.to_file(result)
normal
{ "blob_id": "3b11d514b15775e4c818a7a2adf9a80e89dca968", "index": 5801, "step-1": "<mask token>\n\n\ndef save2txt(songname, lyric, path):\n print('歌词下载完成:' + songname)\n lyric_path = path + '\\\\' + songname + '.txt'\n with open(lyric_path, 'a', encoding='utf-8') as f:\n f.write(lyric)\n\n\n<mask token>\n\n\ndef get_lyrics(songids):\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(\n songids)\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n html = requests.get(url, headers=headers).text\n json_obj = json.loads(html)\n initial_lyric = json_obj['lrc']['lyric']\n reg = re.compile('\\\\[.*\\\\]')\n lyric = re.sub(reg, '', initial_lyric).strip()\n return lyric\n\n\n<mask token>\n\n\ndef get_singer(url):\n chrome_driver = 'D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe'\n browser = webdriver.Chrome(executable_path=chrome_driver)\n wait = WebDriverWait(browser, 5)\n browser.get(url)\n browser.switch_to.frame('g_iframe')\n html = browser.page_source\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.nm.nm-icn.f-thide.s-fc0')\n singername = []\n singerid = []\n for snames in info:\n name = snames.get_text()\n songid = str(re.findall('href=\"(.*?)\"', str(snames))).split('=')[1\n ].split(\"'\")[0]\n singername.append(name)\n singerid.append(songid)\n return zip(singername, singerid)\n\n\n<mask token>\n\n\ndef lyrics_from_singername(name, path):\n id = get_id(name)\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\ndef save_song(songurl, path, songname):\n try:\n urlretrieve(songurl, path)\n print('歌曲下载完成:' + songname)\n except BaseException:\n print('下载失败:' + songname)\n pass\n\n\n<mask token>\n\n\ndef lyrics_from_singerid(id, path):\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef http_get(api):\n my_cookie = {'version': 0, 'name': 'appver', 'value': '1.5.0.75771',\n 'port': None, 'domain': 'www.mydomain.com', 'path': '/', 'secure': \n False, 'expires': None, 'discard': True, 'comment': None,\n 'comment_url': None, 'rest': {}, 'rfc2109': False}\n s = requests.Session()\n s.headers.update({'Referer': 'http://music.163.com/'})\n s.cookies.set(**my_cookie)\n response = s.get(api)\n json_data = json.loads(response.text)\n return json_data\n\n\ndef download_single_mv(id):\n size = '720'\n api = 'http://music.163.com/api/mv/detail?id=' + str(id) + '&type=mp4'\n json_data = http_get(api)\n if json_data['code'] == 200:\n a = list(json_data['data']['brs'].keys())\n if size not in a:\n size = a[0]\n mvurl = json_data['data']['brs'][size]\n artist = json_data['data']['artistName']\n song = json_data['data']['name']\n filename = '%s/[%s]%s.mp4' % (artist, size, song)\n if os.path.exists(filename) == False:\n if os.path.exists(artist) == False:\n os.makedirs(artist)\n\n def reporthook(blocknum, blocksize, totalsize):\n readsofar = blocknum * blocksize\n if totalsize > 0:\n percent = readsofar * 100.0 / totalsize\n s = '\\r%5.1f%% %*d / %d' % (percent, len(str(totalsize)\n ), readsofar, totalsize)\n sys.stderr.write(s)\n if readsofar >= totalsize:\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('read %d\\n' % (readsofar,))\n print('downloading ' + filename)\n urlretrieve(mvurl, filename, reporthook)\n\n\n<mask token>\n\n\ndef _content_generator(music_id):\n url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id\n headers = {'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':\n 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host':\n 'music.163.com', 'Proxy-Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1', 'Cookie':\n '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761'\n , 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'\n }\n limit = 20\n offset = 0\n compiler = re.compile(\n '[^\\\\u4E00-\\\\u9FA5^\\\\u3000-\\\\u303F^\\\\uFF00-\\\\uFFEF^0-9^a-z^A-Z]')\n while True:\n params = {'limit': limit, 'offset': offset}\n offset += limit\n r = requests.get(url, headers=headers, params=params)\n comments = r.json()['comments']\n has_more = r.json()['more']\n for t in comments:\n yield compiler.subn('', t['content'])[0]\n if not has_more:\n break\n\n\nclass WangYiMusicWordCloud:\n stop_words = ['首歌']\n\n def __init__(self, music_id, mask=None, font_path=None, stop_words=None):\n self.music_id = music_id\n self.mask = mask\n self.font_path = font_path\n if not stop_words is None:\n self.stop_words += stop_words\n self.img_wordcloud = None\n\n def _cut_word(self, comment):\n word_pairs = posseg.lcut(comment, HMM=False)\n result = []\n for t in word_pairs:\n if not (t.word in result or t.word in self.stop_words):\n result.append(t.word)\n return '/'.join(result)\n\n def get_words_text(self):\n if os.path.isfile(f'{self.music_id}.txt'):\n print('评论文件已存在,读取文件...')\n with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:\n return f.read()\n else:\n print('没有默认评论文件,开始爬取评论...')\n count = 0\n text = []\n comments = _content_generator(self.music_id)\n for t in comments:\n text.append(self._cut_word(t))\n count += 1\n print(f'\\r已爬取 {count}条评论', end='')\n if count % 100 == 0:\n print(f'\\r已爬取 {count}条评论, 休息 2s', end='')\n time.sleep(2)\n str_text = '\\n'.join(text)\n with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:\n f.write(str_text)\n print(f'\\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')\n return str_text\n\n def generate(self, **kwargs):\n default_kwargs = {'background_color': 'white', 'width': 1000,\n 'height': 860, 'margin': 2, 'max_words': 50, 'stopwords':\n wordcloud.STOPWORDS}\n if not self.mask is None:\n default_kwargs['mask'] = np.array(Image.open(self.mask))\n if not self.font_path is None:\n default_kwargs['font_path'] = self.font_path\n elif 'font_path' not in kwargs:\n raise ValueError('缺少参数 font_path')\n default_kwargs.update(kwargs)\n str_text = self.get_words_text()\n self.wordcloud = wordcloud.WordCloud(**default_kwargs)\n self.img_wordcloud = self.wordcloud.generate(str_text)\n\n def show_wordcloud(self):\n if self.img_wordcloud is None:\n self.generate()\n plt.axis('off')\n plt.imshow(self.img_wordcloud)\n plt.show()\n\n def to_file(self, filename):\n if not hasattr(self, 'wordcloud'):\n self.generate()\n self.wordcloud.to_file(filename)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef save2txt(songname, lyric, path):\n print('歌词下载完成:' + songname)\n lyric_path = path + '\\\\' + songname + '.txt'\n with open(lyric_path, 'a', encoding='utf-8') as f:\n f.write(lyric)\n\n\n<mask token>\n\n\ndef get_lyrics(songids):\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(\n songids)\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n html = requests.get(url, headers=headers).text\n json_obj = json.loads(html)\n initial_lyric = json_obj['lrc']['lyric']\n reg = re.compile('\\\\[.*\\\\]')\n lyric = re.sub(reg, '', initial_lyric).strip()\n return lyric\n\n\ndef lyrics_from_list(url, path):\n new_url = url.replace('/#', '')\n header = {'Host': 'music.163.com', 'Referer': 'https://music.163.com/',\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\n }\n res = requests.get(new_url, headers=header).text\n r = BeautifulSoup(res, 'html.parser')\n music_dict = {}\n result = r.find('ul', {'class', 'f-hide'}).find_all('a')\n for music in result:\n print(music)\n music_id = music.get('href').strip('/song?id=')\n music_name = music.text\n music_dict[music_id] = music_name\n songids = music_dict.keys()\n for i in songids:\n lyric = get_lyrics(i)\n save2txt(music_dict[i], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef get_singer(url):\n chrome_driver = 'D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe'\n browser = webdriver.Chrome(executable_path=chrome_driver)\n wait = WebDriverWait(browser, 5)\n browser.get(url)\n browser.switch_to.frame('g_iframe')\n html = browser.page_source\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.nm.nm-icn.f-thide.s-fc0')\n singername = []\n singerid = []\n for snames in info:\n name = snames.get_text()\n songid = str(re.findall('href=\"(.*?)\"', str(snames))).split('=')[1\n ].split(\"'\")[0]\n singername.append(name)\n singerid.append(songid)\n return zip(singername, singerid)\n\n\n<mask token>\n\n\ndef download_singer():\n idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001, \n 6002, 6003, 7001, 7002, 7003]\n for id in idlist:\n url = ('https://music.163.com/#/discover/artist/cat?id={}&initial=-1'\n .format(id))\n save2csv(url)\n\n\n<mask token>\n\n\ndef get_html(url):\n proxy_addr = {'http': '61.135.217.7:80'}\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n try:\n html = requests.get(url, headers=headers, proxies=proxy_addr).text\n return html\n except BaseException:\n print('request error')\n pass\n\n\n<mask token>\n\n\ndef lyrics_from_singername(name, path):\n id = get_id(name)\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\ndef save_song(songurl, path, songname):\n try:\n urlretrieve(songurl, path)\n print('歌曲下载完成:' + songname)\n except BaseException:\n print('下载失败:' + songname)\n pass\n\n\n<mask token>\n\n\ndef lyrics_from_singerid(id, path):\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef http_get(api):\n my_cookie = {'version': 0, 'name': 'appver', 'value': '1.5.0.75771',\n 'port': None, 'domain': 'www.mydomain.com', 'path': '/', 'secure': \n False, 'expires': None, 'discard': True, 'comment': None,\n 'comment_url': None, 'rest': {}, 'rfc2109': False}\n s = requests.Session()\n s.headers.update({'Referer': 'http://music.163.com/'})\n s.cookies.set(**my_cookie)\n response = s.get(api)\n json_data = json.loads(response.text)\n return json_data\n\n\ndef download_single_mv(id):\n size = '720'\n api = 'http://music.163.com/api/mv/detail?id=' + str(id) + '&type=mp4'\n json_data = http_get(api)\n if json_data['code'] == 200:\n a = list(json_data['data']['brs'].keys())\n if size not in a:\n size = a[0]\n mvurl = json_data['data']['brs'][size]\n artist = json_data['data']['artistName']\n song = json_data['data']['name']\n filename = '%s/[%s]%s.mp4' % (artist, size, song)\n if os.path.exists(filename) == False:\n if os.path.exists(artist) == False:\n os.makedirs(artist)\n\n def reporthook(blocknum, blocksize, totalsize):\n readsofar = blocknum * blocksize\n if totalsize > 0:\n percent = readsofar * 100.0 / totalsize\n s = '\\r%5.1f%% %*d / %d' % (percent, len(str(totalsize)\n ), readsofar, totalsize)\n sys.stderr.write(s)\n if readsofar >= totalsize:\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('read %d\\n' % (readsofar,))\n print('downloading ' + filename)\n urlretrieve(mvurl, filename, reporthook)\n\n\n<mask token>\n\n\ndef _content_generator(music_id):\n url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id\n headers = {'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':\n 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host':\n 'music.163.com', 'Proxy-Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1', 'Cookie':\n '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761'\n , 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'\n }\n limit = 20\n offset = 0\n compiler = re.compile(\n '[^\\\\u4E00-\\\\u9FA5^\\\\u3000-\\\\u303F^\\\\uFF00-\\\\uFFEF^0-9^a-z^A-Z]')\n while True:\n params = {'limit': limit, 'offset': offset}\n offset += limit\n r = requests.get(url, headers=headers, params=params)\n comments = r.json()['comments']\n has_more = r.json()['more']\n for t in comments:\n yield compiler.subn('', t['content'])[0]\n if not has_more:\n break\n\n\nclass WangYiMusicWordCloud:\n stop_words = ['首歌']\n\n def __init__(self, music_id, mask=None, font_path=None, stop_words=None):\n self.music_id = music_id\n self.mask = mask\n self.font_path = font_path\n if not stop_words is None:\n self.stop_words += stop_words\n self.img_wordcloud = None\n\n def _cut_word(self, comment):\n word_pairs = posseg.lcut(comment, HMM=False)\n result = []\n for t in word_pairs:\n if not (t.word in result or t.word in self.stop_words):\n result.append(t.word)\n return '/'.join(result)\n\n def get_words_text(self):\n if os.path.isfile(f'{self.music_id}.txt'):\n print('评论文件已存在,读取文件...')\n with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:\n return f.read()\n else:\n print('没有默认评论文件,开始爬取评论...')\n count = 0\n text = []\n comments = _content_generator(self.music_id)\n for t in comments:\n text.append(self._cut_word(t))\n count += 1\n print(f'\\r已爬取 {count}条评论', end='')\n if count % 100 == 0:\n print(f'\\r已爬取 {count}条评论, 休息 2s', end='')\n time.sleep(2)\n str_text = '\\n'.join(text)\n with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:\n f.write(str_text)\n print(f'\\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')\n return str_text\n\n def generate(self, **kwargs):\n default_kwargs = {'background_color': 'white', 'width': 1000,\n 'height': 860, 'margin': 2, 'max_words': 50, 'stopwords':\n wordcloud.STOPWORDS}\n if not self.mask is None:\n default_kwargs['mask'] = np.array(Image.open(self.mask))\n if not self.font_path is None:\n default_kwargs['font_path'] = self.font_path\n elif 'font_path' not in kwargs:\n raise ValueError('缺少参数 font_path')\n default_kwargs.update(kwargs)\n str_text = self.get_words_text()\n self.wordcloud = wordcloud.WordCloud(**default_kwargs)\n self.img_wordcloud = self.wordcloud.generate(str_text)\n\n def show_wordcloud(self):\n if self.img_wordcloud is None:\n self.generate()\n plt.axis('off')\n plt.imshow(self.img_wordcloud)\n plt.show()\n\n def to_file(self, filename):\n if not hasattr(self, 'wordcloud'):\n self.generate()\n self.wordcloud.to_file(filename)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef save2txt(songname, lyric, path):\n print('歌词下载完成:' + songname)\n lyric_path = path + '\\\\' + songname + '.txt'\n with open(lyric_path, 'a', encoding='utf-8') as f:\n f.write(lyric)\n\n\n<mask token>\n\n\ndef get_lyrics(songids):\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(\n songids)\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n html = requests.get(url, headers=headers).text\n json_obj = json.loads(html)\n initial_lyric = json_obj['lrc']['lyric']\n reg = re.compile('\\\\[.*\\\\]')\n lyric = re.sub(reg, '', initial_lyric).strip()\n return lyric\n\n\ndef lyrics_from_list(url, path):\n new_url = url.replace('/#', '')\n header = {'Host': 'music.163.com', 'Referer': 'https://music.163.com/',\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\n }\n res = requests.get(new_url, headers=header).text\n r = BeautifulSoup(res, 'html.parser')\n music_dict = {}\n result = r.find('ul', {'class', 'f-hide'}).find_all('a')\n for music in result:\n print(music)\n music_id = music.get('href').strip('/song?id=')\n music_name = music.text\n music_dict[music_id] = music_name\n songids = music_dict.keys()\n for i in songids:\n lyric = get_lyrics(i)\n save2txt(music_dict[i], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef get_singer(url):\n chrome_driver = 'D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe'\n browser = webdriver.Chrome(executable_path=chrome_driver)\n wait = WebDriverWait(browser, 5)\n browser.get(url)\n browser.switch_to.frame('g_iframe')\n html = browser.page_source\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.nm.nm-icn.f-thide.s-fc0')\n singername = []\n singerid = []\n for snames in info:\n name = snames.get_text()\n songid = str(re.findall('href=\"(.*?)\"', str(snames))).split('=')[1\n ].split(\"'\")[0]\n singername.append(name)\n singerid.append(songid)\n return zip(singername, singerid)\n\n\n<mask token>\n\n\ndef download_singer():\n idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001, \n 6002, 6003, 7001, 7002, 7003]\n for id in idlist:\n url = ('https://music.163.com/#/discover/artist/cat?id={}&initial=-1'\n .format(id))\n save2csv(url)\n\n\n<mask token>\n\n\ndef get_html(url):\n proxy_addr = {'http': '61.135.217.7:80'}\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n try:\n html = requests.get(url, headers=headers, proxies=proxy_addr).text\n return html\n except BaseException:\n print('request error')\n pass\n\n\ndef get_top50(html):\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.f-hide #song-list-pre-cache a')\n songname = []\n songids = []\n for sn in info:\n songnames = sn.getText()\n songname.append(songnames)\n for si in info:\n songid = str(re.findall('href=\"(.*?)\"', str(si))).strip().split('=')[-1\n ].split(\"'\")[0]\n songids.append(songid)\n return zip(songname, songids)\n\n\ndef lyrics_from_singername(name, path):\n id = get_id(name)\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\ndef save_song(songurl, path, songname):\n try:\n urlretrieve(songurl, path)\n print('歌曲下载完成:' + songname)\n except BaseException:\n print('下载失败:' + songname)\n pass\n\n\n<mask token>\n\n\ndef lyrics_from_singerid(id, path):\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef http_get(api):\n my_cookie = {'version': 0, 'name': 'appver', 'value': '1.5.0.75771',\n 'port': None, 'domain': 'www.mydomain.com', 'path': '/', 'secure': \n False, 'expires': None, 'discard': True, 'comment': None,\n 'comment_url': None, 'rest': {}, 'rfc2109': False}\n s = requests.Session()\n s.headers.update({'Referer': 'http://music.163.com/'})\n s.cookies.set(**my_cookie)\n response = s.get(api)\n json_data = json.loads(response.text)\n return json_data\n\n\ndef download_single_mv(id):\n size = '720'\n api = 'http://music.163.com/api/mv/detail?id=' + str(id) + '&type=mp4'\n json_data = http_get(api)\n if json_data['code'] == 200:\n a = list(json_data['data']['brs'].keys())\n if size not in a:\n size = a[0]\n mvurl = json_data['data']['brs'][size]\n artist = json_data['data']['artistName']\n song = json_data['data']['name']\n filename = '%s/[%s]%s.mp4' % (artist, size, song)\n if os.path.exists(filename) == False:\n if os.path.exists(artist) == False:\n os.makedirs(artist)\n\n def reporthook(blocknum, blocksize, totalsize):\n readsofar = blocknum * blocksize\n if totalsize > 0:\n percent = readsofar * 100.0 / totalsize\n s = '\\r%5.1f%% %*d / %d' % (percent, len(str(totalsize)\n ), readsofar, totalsize)\n sys.stderr.write(s)\n if readsofar >= totalsize:\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('read %d\\n' % (readsofar,))\n print('downloading ' + filename)\n urlretrieve(mvurl, filename, reporthook)\n\n\n<mask token>\n\n\ndef _content_generator(music_id):\n url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id\n headers = {'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':\n 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host':\n 'music.163.com', 'Proxy-Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1', 'Cookie':\n '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761'\n , 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'\n }\n limit = 20\n offset = 0\n compiler = re.compile(\n '[^\\\\u4E00-\\\\u9FA5^\\\\u3000-\\\\u303F^\\\\uFF00-\\\\uFFEF^0-9^a-z^A-Z]')\n while True:\n params = {'limit': limit, 'offset': offset}\n offset += limit\n r = requests.get(url, headers=headers, params=params)\n comments = r.json()['comments']\n has_more = r.json()['more']\n for t in comments:\n yield compiler.subn('', t['content'])[0]\n if not has_more:\n break\n\n\nclass WangYiMusicWordCloud:\n stop_words = ['首歌']\n\n def __init__(self, music_id, mask=None, font_path=None, stop_words=None):\n self.music_id = music_id\n self.mask = mask\n self.font_path = font_path\n if not stop_words is None:\n self.stop_words += stop_words\n self.img_wordcloud = None\n\n def _cut_word(self, comment):\n word_pairs = posseg.lcut(comment, HMM=False)\n result = []\n for t in word_pairs:\n if not (t.word in result or t.word in self.stop_words):\n result.append(t.word)\n return '/'.join(result)\n\n def get_words_text(self):\n if os.path.isfile(f'{self.music_id}.txt'):\n print('评论文件已存在,读取文件...')\n with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:\n return f.read()\n else:\n print('没有默认评论文件,开始爬取评论...')\n count = 0\n text = []\n comments = _content_generator(self.music_id)\n for t in comments:\n text.append(self._cut_word(t))\n count += 1\n print(f'\\r已爬取 {count}条评论', end='')\n if count % 100 == 0:\n print(f'\\r已爬取 {count}条评论, 休息 2s', end='')\n time.sleep(2)\n str_text = '\\n'.join(text)\n with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:\n f.write(str_text)\n print(f'\\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')\n return str_text\n\n def generate(self, **kwargs):\n default_kwargs = {'background_color': 'white', 'width': 1000,\n 'height': 860, 'margin': 2, 'max_words': 50, 'stopwords':\n wordcloud.STOPWORDS}\n if not self.mask is None:\n default_kwargs['mask'] = np.array(Image.open(self.mask))\n if not self.font_path is None:\n default_kwargs['font_path'] = self.font_path\n elif 'font_path' not in kwargs:\n raise ValueError('缺少参数 font_path')\n default_kwargs.update(kwargs)\n str_text = self.get_words_text()\n self.wordcloud = wordcloud.WordCloud(**default_kwargs)\n self.img_wordcloud = self.wordcloud.generate(str_text)\n\n def show_wordcloud(self):\n if self.img_wordcloud is None:\n self.generate()\n plt.axis('off')\n plt.imshow(self.img_wordcloud)\n plt.show()\n\n def to_file(self, filename):\n if not hasattr(self, 'wordcloud'):\n self.generate()\n self.wordcloud.to_file(filename)\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef save2txt(songname, lyric, path):\n print('歌词下载完成:' + songname)\n lyric_path = path + '\\\\' + songname + '.txt'\n with open(lyric_path, 'a', encoding='utf-8') as f:\n f.write(lyric)\n\n\ndef single_song_lyric(song_id, path, song_name):\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(\n song_id)\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n html = requests.get(url, headers=headers).text\n json_obj = json.loads(html)\n initial_lyric = json_obj['lrc']['lyric']\n reg = re.compile('\\\\[.*\\\\]')\n lyric = re.sub(reg, '', initial_lyric).strip()\n save2txt(song_name, lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef get_lyrics(songids):\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(\n songids)\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n html = requests.get(url, headers=headers).text\n json_obj = json.loads(html)\n initial_lyric = json_obj['lrc']['lyric']\n reg = re.compile('\\\\[.*\\\\]')\n lyric = re.sub(reg, '', initial_lyric).strip()\n return lyric\n\n\ndef lyrics_from_list(url, path):\n new_url = url.replace('/#', '')\n header = {'Host': 'music.163.com', 'Referer': 'https://music.163.com/',\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\n }\n res = requests.get(new_url, headers=header).text\n r = BeautifulSoup(res, 'html.parser')\n music_dict = {}\n result = r.find('ul', {'class', 'f-hide'}).find_all('a')\n for music in result:\n print(music)\n music_id = music.get('href').strip('/song?id=')\n music_name = music.text\n music_dict[music_id] = music_name\n songids = music_dict.keys()\n for i in songids:\n lyric = get_lyrics(i)\n save2txt(music_dict[i], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef get_singer(url):\n chrome_driver = 'D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe'\n browser = webdriver.Chrome(executable_path=chrome_driver)\n wait = WebDriverWait(browser, 5)\n browser.get(url)\n browser.switch_to.frame('g_iframe')\n html = browser.page_source\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.nm.nm-icn.f-thide.s-fc0')\n singername = []\n singerid = []\n for snames in info:\n name = snames.get_text()\n songid = str(re.findall('href=\"(.*?)\"', str(snames))).split('=')[1\n ].split(\"'\")[0]\n singername.append(name)\n singerid.append(songid)\n return zip(singername, singerid)\n\n\ndef get_data(url):\n data = []\n for singernames, singerids in get_singer(url):\n info = {}\n info['歌手名字'] = singernames\n info['歌手ID'] = singerids\n data.append(info)\n return data\n\n\n<mask token>\n\n\ndef download_singer():\n idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001, \n 6002, 6003, 7001, 7002, 7003]\n for id in idlist:\n url = ('https://music.163.com/#/discover/artist/cat?id={}&initial=-1'\n .format(id))\n save2csv(url)\n\n\n<mask token>\n\n\ndef get_html(url):\n proxy_addr = {'http': '61.135.217.7:80'}\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n try:\n html = requests.get(url, headers=headers, proxies=proxy_addr).text\n return html\n except BaseException:\n print('request error')\n pass\n\n\ndef get_top50(html):\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.f-hide #song-list-pre-cache a')\n songname = []\n songids = []\n for sn in info:\n songnames = sn.getText()\n songname.append(songnames)\n for si in info:\n songid = str(re.findall('href=\"(.*?)\"', str(si))).strip().split('=')[-1\n ].split(\"'\")[0]\n songids.append(songid)\n return zip(songname, songids)\n\n\ndef lyrics_from_singername(name, path):\n id = get_id(name)\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\ndef save_song(songurl, path, songname):\n try:\n urlretrieve(songurl, path)\n print('歌曲下载完成:' + songname)\n except BaseException:\n print('下载失败:' + songname)\n pass\n\n\ndef songs_from_singername(name, path):\n id = get_id(name)\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n songid = singer_info[1]\n songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(\n songid)\n songname = singer_info[0]\n down_path = path + '\\\\' + songname + '.mp3'\n save_song(songurl, down_path, songname)\n time.sleep(1)\n\n\ndef lyrics_from_singerid(id, path):\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef http_get(api):\n my_cookie = {'version': 0, 'name': 'appver', 'value': '1.5.0.75771',\n 'port': None, 'domain': 'www.mydomain.com', 'path': '/', 'secure': \n False, 'expires': None, 'discard': True, 'comment': None,\n 'comment_url': None, 'rest': {}, 'rfc2109': False}\n s = requests.Session()\n s.headers.update({'Referer': 'http://music.163.com/'})\n s.cookies.set(**my_cookie)\n response = s.get(api)\n json_data = json.loads(response.text)\n return json_data\n\n\ndef download_single_mv(id):\n size = '720'\n api = 'http://music.163.com/api/mv/detail?id=' + str(id) + '&type=mp4'\n json_data = http_get(api)\n if json_data['code'] == 200:\n a = list(json_data['data']['brs'].keys())\n if size not in a:\n size = a[0]\n mvurl = json_data['data']['brs'][size]\n artist = json_data['data']['artistName']\n song = json_data['data']['name']\n filename = '%s/[%s]%s.mp4' % (artist, size, song)\n if os.path.exists(filename) == False:\n if os.path.exists(artist) == False:\n os.makedirs(artist)\n\n def reporthook(blocknum, blocksize, totalsize):\n readsofar = blocknum * blocksize\n if totalsize > 0:\n percent = readsofar * 100.0 / totalsize\n s = '\\r%5.1f%% %*d / %d' % (percent, len(str(totalsize)\n ), readsofar, totalsize)\n sys.stderr.write(s)\n if readsofar >= totalsize:\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('read %d\\n' % (readsofar,))\n print('downloading ' + filename)\n urlretrieve(mvurl, filename, reporthook)\n\n\n<mask token>\n\n\ndef _content_generator(music_id):\n url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id\n headers = {'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':\n 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host':\n 'music.163.com', 'Proxy-Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1', 'Cookie':\n '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761'\n , 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'\n }\n limit = 20\n offset = 0\n compiler = re.compile(\n '[^\\\\u4E00-\\\\u9FA5^\\\\u3000-\\\\u303F^\\\\uFF00-\\\\uFFEF^0-9^a-z^A-Z]')\n while True:\n params = {'limit': limit, 'offset': offset}\n offset += limit\n r = requests.get(url, headers=headers, params=params)\n comments = r.json()['comments']\n has_more = r.json()['more']\n for t in comments:\n yield compiler.subn('', t['content'])[0]\n if not has_more:\n break\n\n\nclass WangYiMusicWordCloud:\n stop_words = ['首歌']\n\n def __init__(self, music_id, mask=None, font_path=None, stop_words=None):\n self.music_id = music_id\n self.mask = mask\n self.font_path = font_path\n if not stop_words is None:\n self.stop_words += stop_words\n self.img_wordcloud = None\n\n def _cut_word(self, comment):\n word_pairs = posseg.lcut(comment, HMM=False)\n result = []\n for t in word_pairs:\n if not (t.word in result or t.word in self.stop_words):\n result.append(t.word)\n return '/'.join(result)\n\n def get_words_text(self):\n if os.path.isfile(f'{self.music_id}.txt'):\n print('评论文件已存在,读取文件...')\n with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:\n return f.read()\n else:\n print('没有默认评论文件,开始爬取评论...')\n count = 0\n text = []\n comments = _content_generator(self.music_id)\n for t in comments:\n text.append(self._cut_word(t))\n count += 1\n print(f'\\r已爬取 {count}条评论', end='')\n if count % 100 == 0:\n print(f'\\r已爬取 {count}条评论, 休息 2s', end='')\n time.sleep(2)\n str_text = '\\n'.join(text)\n with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:\n f.write(str_text)\n print(f'\\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')\n return str_text\n\n def generate(self, **kwargs):\n default_kwargs = {'background_color': 'white', 'width': 1000,\n 'height': 860, 'margin': 2, 'max_words': 50, 'stopwords':\n wordcloud.STOPWORDS}\n if not self.mask is None:\n default_kwargs['mask'] = np.array(Image.open(self.mask))\n if not self.font_path is None:\n default_kwargs['font_path'] = self.font_path\n elif 'font_path' not in kwargs:\n raise ValueError('缺少参数 font_path')\n default_kwargs.update(kwargs)\n str_text = self.get_words_text()\n self.wordcloud = wordcloud.WordCloud(**default_kwargs)\n self.img_wordcloud = self.wordcloud.generate(str_text)\n\n def show_wordcloud(self):\n if self.img_wordcloud is None:\n self.generate()\n plt.axis('off')\n plt.imshow(self.img_wordcloud)\n plt.show()\n\n def to_file(self, filename):\n if not hasattr(self, 'wordcloud'):\n self.generate()\n self.wordcloud.to_file(filename)\n\n\n<mask token>\n", "step-5": "import requests\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.request import urlretrieve\r\nimport json\r\nimport time\r\n\r\n#功能一:下载单一歌曲、歌词\r\n\r\ndef single_song(song_id,path,song_name): #下载单一歌曲,输入为歌曲id,保存路径,歌曲名称\r\n song_url = \"http://music.163.com/song/media/outer/url?id=%s\" % song_id\r\n down_path = path +'\\\\'+ song_name + '.mp3'\r\n urlretrieve(song_url,down_path)\r\n print(\"歌曲下载完成:\"+song_name)\r\n\r\ndef save2txt(songname, lyric,path): #写进歌词到指定路径,并保存,输入为歌曲名称、歌词信息、保存路径\r\n # print('正在保存歌曲:{}'.format(songname))\r\n print(\"歌词下载完成:\"+songname)\r\n lyric_path=path+'\\\\'+songname+'.txt'\r\n with open(lyric_path, 'a', encoding='utf-8')as f:\r\n f.write(lyric)\r\n\r\ndef single_song_lyric(song_id,path,song_name): #下载单一歌曲的歌词,输入为歌曲id,保存路径,歌曲名称\r\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(song_id)\r\n headers = {\r\n 'User-Agent':\r\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\r\n html = requests.get(url, headers=headers).text\r\n json_obj = json.loads(html)\r\n initial_lyric = json_obj['lrc']['lyric']\r\n reg = re.compile(r'\\[.*\\]')\r\n lyric = re.sub(reg, '', initial_lyric).strip()\r\n save2txt(song_name, lyric, path)\r\n time.sleep(1)\r\n\r\n\r\n#功能二:根据歌单url下载\r\n\r\ndef songs_from_list(url,path): #url:歌单网址;path:本地保存目录 下载某一歌单的所有歌曲(包括歌手页、排行榜)\r\n new_url = url.replace('/#', '')\r\n\r\n header = {\r\n 'Host': 'music.163.com',\r\n 'Referer': 'https://music.163.com/',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\r\n }\r\n\r\n res = requests.get(new_url, headers=header).text\r\n\r\n r = BeautifulSoup(res, \"html.parser\")\r\n music_dict = {}\r\n result = r.find('ul', {'class', 'f-hide'}).find_all('a')\r\n for music in result:\r\n print(music)\r\n music_id = music.get('href').strip('/song?id=')\r\n music_name = music.text\r\n music_dict[music_id] = music_name\r\n for song_id in music_dict:\r\n song_url = \"http://music.163.com/song/media/outer/url?id=%s\" % song_id\r\n down_path=path+'\\\\'+music_dict[song_id]+'.mp3'\r\n\r\n # path = \"C:\\\\Users\\\\ming-\\\\Downloads\\\\%s.mp3\" % music_dict[song_id]\r\n\r\n # 添加数据\r\n print( \"正在下载:%s\" % music_dict[song_id])\r\n # text.see(END)\r\n # text.update()\r\n\r\n urlretrieve(song_url, down_path)\r\n\r\ndef get_lyrics(songids): #根据歌曲id获取歌词,输入为歌曲Id\r\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(songids)\r\n headers = {\r\n 'User-Agent':\r\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\r\n html = requests.get(url, headers=headers).text\r\n json_obj = json.loads(html)\r\n initial_lyric = json_obj['lrc']['lyric']\r\n reg = re.compile(r'\\[.*\\]')\r\n lyric = re.sub(reg, '', initial_lyric).strip()\r\n return lyric\r\n\r\ndef lyrics_from_list(url,path): #根据歌单下载歌曲歌词\r\n new_url = url.replace('/#', '')\r\n\r\n header = {\r\n 'Host': 'music.163.com',\r\n 'Referer': 'https://music.163.com/',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\r\n }\r\n\r\n res = requests.get(new_url, headers=header).text\r\n\r\n r = BeautifulSoup(res, \"html.parser\")\r\n music_dict = {}\r\n result = r.find('ul', {'class', 'f-hide'}).find_all('a')\r\n for music in result:\r\n print(music)\r\n music_id = music.get('href').strip('/song?id=')\r\n music_name = music.text\r\n music_dict[music_id] = music_name\r\n songids=music_dict.keys()\r\n for i in songids:\r\n lyric=get_lyrics(i)\r\n save2txt(music_dict[i],lyric,path)\r\n time.sleep(1)\r\n\r\n\r\n#功能三:根据歌手下载\r\n\r\n#获取歌手信息和id\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nimport csv\r\nimport re\r\n# chrome_driver = \"D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe\" #chromedriver的文件位置\r\n# browser = webdriver.Chrome(executable_path = chrome_driver)\r\n# wait = WebDriverWait(browser, 5) # 设置等待时间\r\ndef get_singer(url): # 返回歌手名字和歌手id,输入为歌手详情页\r\n chrome_driver = \"D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe\" # chromedriver的文件位置\r\n browser = webdriver.Chrome(executable_path=chrome_driver)\r\n wait = WebDriverWait(browser, 5) # 设置等待时间\r\n browser.get(url)\r\n browser.switch_to.frame('g_iframe')\r\n html = browser.page_source\r\n soup = BeautifulSoup(html, 'lxml')\r\n info = soup.select('.nm.nm-icn.f-thide.s-fc0')\r\n singername = []\r\n singerid = []\r\n for snames in info:\r\n name = snames.get_text()\r\n songid = str(re.findall('href=\"(.*?)\"', str(snames))).split('=')[1].split('\\'')[0] #正则表达式获取歌曲id\r\n singername.append(name)\r\n singerid.append(songid)\r\n return zip(singername, singerid)\r\n\r\ndef get_data(url):\r\n data = []\r\n for singernames, singerids in get_singer(url):\r\n info = {}\r\n info['歌手名字'] = singernames\r\n info['歌手ID'] = singerids\r\n data.append(info)\r\n return data\r\n\r\ndef save2csv(url):\r\n print('保存歌手信息中...请稍后查看')\r\n with open('singer.csv', 'a', newline='', encoding='utf-8-sig') as f:\r\n # CSV 基本写入用 w,追加改模式 w 为 a\r\n fieldnames = ['歌手名字', '歌手ID']\r\n writer = csv.DictWriter(f, fieldnames=fieldnames)\r\n writer.writeheader()\r\n data = get_data(url)\r\n print(data)\r\n writer.writerows(data)\r\n print('保存成功')\r\n\r\ndef download_singer():\r\n idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001, 6002, 6003, 7001, 7002, 7003]\r\n for id in idlist:\r\n url = 'https://music.163.com/#/discover/artist/cat?id={}&initial=-1'.format(id)\r\n save2csv(url)\r\n\r\ndef get_id(singer_name): #根据歌手姓名获取对应的歌手id,输入为歌手姓名\r\n file = \"lib\\\\singer_info.csv\"\r\n with open(file, 'r',encoding='utf-8-sig') as f:\r\n reader = csv.reader(f)\r\n name = []\r\n id = []\r\n for i in reader:\r\n name.append(i[0])\r\n id.append(i[1])\r\n a=name.index(singer_name)\r\n return id[a]\r\n\r\n\r\n#根据歌手姓名下载\r\ndef get_html(url): #通过代理获取网页信息,输入为指定网页url\r\n proxy_addr = {'http': '61.135.217.7:80'}\r\n # 用的代理 ip,如果被封或者失效,在http://www.xicidaili.com/换一个\r\n headers = {\r\n 'User-Agent':\r\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\r\n try:\r\n html = requests.get(url, headers=headers, proxies=proxy_addr).text\r\n return html\r\n except BaseException:\r\n print('request error')\r\n pass\r\n\r\ndef get_top50(html): #获取热度前50名的歌曲,并返回对应的歌曲名称和歌曲id,输入为歌手详情页\r\n soup = BeautifulSoup(html, 'lxml')\r\n info = soup.select('.f-hide #song-list-pre-cache a')\r\n songname = []\r\n songids = []\r\n for sn in info:\r\n songnames = sn.getText()\r\n songname.append(songnames)\r\n for si in info:\r\n songid = str(re.findall('href=\"(.*?)\"', str(si))).strip().split('=')[-1].split('\\'')[0] # 用re查找,查找对象一定要是str类型\r\n songids.append(songid)\r\n return zip(songname, songids)\r\n\r\ndef lyrics_from_singername(name,path): #根据歌手姓名下载热度前50名歌曲的歌词\r\n id=get_id(name)\r\n top50url = 'https://music.163.com/artist?id={}'.format(id)\r\n html = get_html(top50url)\r\n singer_infos = get_top50(html)\r\n for singer_info in singer_infos:\r\n lyric = get_lyrics(singer_info[1])\r\n save2txt(singer_info[0], lyric, path)\r\n time.sleep(1)\r\n\r\ndef save_song(songurl, path,songname): #下载指定链接的歌曲,并保存到指定路径,输入为歌曲下载链接、保存路径、歌曲名称\r\n try:\r\n urlretrieve(songurl, path)\r\n print('歌曲下载完成:' + songname)\r\n except BaseException:\r\n print('下载失败:' + songname)\r\n pass\r\n\r\ndef songs_from_singername(name,path): #根据歌手姓名下载歌曲到指定路径,输入为歌手姓名和保存路径\r\n id=get_id(name)\r\n top50url = 'https://music.163.com/artist?id={}'.format(id)\r\n html = get_html(top50url)\r\n singer_infos = get_top50(html)\r\n for singer_info in singer_infos:\r\n songid = singer_info[1]\r\n songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(songid)\r\n songname = singer_info[0]\r\n # path = 'D:\\\\code_new\\\\pycharm\\\\yunmusic\\\\song' + songname + '.mp3'\r\n down_path=path+'\\\\'+songname+'.mp3'\r\n save_song(songurl, down_path,songname)\r\n time.sleep(1)\r\n\r\ndef lyrics_from_singerid(id,path): #根据歌手id下载歌词,输入为歌手id和本地保存路径\r\n top50url = 'https://music.163.com/artist?id={}'.format(id)\r\n html = get_html(top50url)\r\n singer_infos = get_top50(html)\r\n for singer_info in singer_infos:\r\n lyric = get_lyrics(singer_info[1])\r\n save2txt(singer_info[0], lyric, path)\r\n time.sleep(1)\r\n\r\ndef songs_from_singerid(id,path): #根据歌手id下载歌曲音频,输入为歌手id和本地保存路径\r\n top50url = 'https://music.163.com/artist?id={}'.format(id)\r\n html = get_html(top50url)\r\n singer_infos = get_top50(html)\r\n for singer_info in singer_infos:\r\n songid = singer_info[1]\r\n songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(songid)\r\n songname = singer_info[0]\r\n # path = 'D:\\\\code_new\\\\pycharm\\\\yunmusic\\\\song' + songname + '.mp3'\r\n down_path = path + '\\\\' + songname + '.mp3'\r\n save_song(songurl, down_path, songname)\r\n time.sleep(1)\r\n\r\n#功能四:下载mv\r\nimport requests\r\nimport os\r\nimport sys\r\nfrom urllib.parse import urlparse,parse_qs\r\n\r\ndef http_get(api):\r\n my_cookie = {\r\n \"version\":0,\r\n \"name\":'appver',\r\n \"value\":'1.5.0.75771',\r\n \"port\":None,\r\n # \"port_specified\":False,\r\n \"domain\":'www.mydomain.com',\r\n # \"domain_specified\":False,\r\n # \"domain_initial_dot\":False,\r\n \"path\":'/',\r\n # \"path_specified\":True,\r\n \"secure\":False,\r\n \"expires\":None,\r\n \"discard\":True,\r\n \"comment\":None,\r\n \"comment_url\":None,\r\n \"rest\":{},\r\n \"rfc2109\":False\r\n }\r\n\r\n s = requests.Session()\r\n s.headers.update({'Referer': \"http://music.163.com/\"})\r\n s.cookies.set(**my_cookie)\r\n response = s.get(api)\r\n json_data = json.loads(response.text)\r\n return json_data\r\n\r\ndef download_single_mv(id): #根据mvid下载\r\n size = \"720\" #default 720p\r\n api = \"http://music.163.com/api/mv/detail?id=\"+str(id)+\"&type=mp4\"\r\n json_data = http_get(api)\r\n if json_data[\"code\"]==200:\r\n a = list(json_data[\"data\"][\"brs\"].keys())\r\n if size not in a:\r\n size = a[0] #如果没有720p,则选择最小的版本\r\n mvurl = json_data[\"data\"][\"brs\"][size] #mv网址\r\n artist = json_data[\"data\"][\"artistName\"] #歌手信息\r\n song = json_data[\"data\"][\"name\"] #歌曲信息\r\n\r\n filename = '%s/[%s]%s.mp4' %(artist,size,song)\r\n\r\n if os.path.exists(filename)==False:\r\n if os.path.exists(artist)==False:\r\n os.makedirs(artist)\r\n def reporthook(blocknum, blocksize, totalsize):\r\n readsofar = blocknum * blocksize\r\n if totalsize > 0:\r\n percent = readsofar * 1e2 / totalsize\r\n s = \"\\r%5.1f%% %*d / %d\" % (\r\n percent, len(str(totalsize)), readsofar, totalsize)\r\n sys.stderr.write(s)\r\n if readsofar >= totalsize: # near the end\r\n sys.stderr.write(\"\\n\")\r\n else: # total size is unknown\r\n sys.stderr.write(\"read %d\\n\" % (readsofar,))\r\n print(\"downloading \"+filename)\r\n urlretrieve(mvurl,filename,reporthook)\r\n\r\ndef download_mv_from_list(url): #批量下载歌单的mv资源\r\n input=url.replace(\"#\",\"\")\r\n id = parse_qs(urlparse(input).query)[\"id\"][0]\r\n if \"playlist\" in input:\r\n playlist_api = \"http://music.163.com/api/playlist/detail?id=%s\" % (id)\r\n json_data = http_get(playlist_api)\r\n for idx, mv in enumerate(json_data[\"result\"][\"tracks\"]): #mv信息\r\n download_single_mv(mv[\"mvid\"])\r\n print(\"downloaded:\" + str(idx))\r\n elif \"album\" in input:\r\n playlist_api = \"http://music.163.com/api/album/%s\" % (id)\r\n json_data = http_get(playlist_api)\r\n for idx, mv in enumerate(json_data[\"album\"][\"songs\"]):\r\n if mv[\"mvid\"] != None and mv[\"mvid\"] != 0:\r\n download_single_mv(mv[\"mvid\"])\r\n print(\"downloaded:\" + str(idx))\r\n download_single_mv(id)\r\n\r\n\r\n#功能五:爬取歌曲评论并生成词云图\r\nfrom jieba import posseg\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport wordcloud\r\n\r\ndef _content_generator(music_id): #根据歌曲id获取评论信息\r\n url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id\r\n headers = {\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\r\n 'Accept-Encoding': 'gzip, deflate',\r\n 'Accept-Language': 'zh-CN,zh;q=0.9',\r\n 'Cache-Control': 'max-age=0',\r\n 'Host': 'music.163.com',\r\n 'Proxy-Connection': 'keep-alive',\r\n 'Upgrade-Insecure-Requests': '1',\r\n 'Cookie': '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36',\r\n }\r\n limit = 20\r\n offset = 0\r\n compiler = re.compile(r'[^\\u4E00-\\u9FA5^\\u3000-\\u303F^\\uFF00-\\uFFEF^0-9^a-z^A-Z]')\r\n\r\n while True:\r\n params = {\r\n 'limit': limit,\r\n 'offset': offset,\r\n }\r\n offset += limit\r\n r = requests.get(url, headers=headers, params=params)\r\n comments = r.json()['comments']\r\n has_more = r.json()['more']\r\n\r\n for t in comments:\r\n yield compiler.subn('', t['content'])[0]\r\n\r\n if not has_more:\r\n break\r\n\r\n\r\nclass WangYiMusicWordCloud: #自定义类,生成词云图\r\n stop_words = ['首歌']\r\n def __init__(self, music_id, mask=None, font_path=None, stop_words=None):\r\n self.music_id = music_id #歌曲信息\r\n self.mask = mask #背景图片\r\n self.font_path = font_path #字体\r\n\r\n if not stop_words is None:\r\n self.stop_words+=stop_words\r\n\r\n self.img_wordcloud = None\r\n\r\n def _cut_word(self, comment): #分词\r\n word_pairs = posseg.lcut(comment, HMM=False)\r\n result = []\r\n for t in word_pairs:\r\n if not (t.word in result or t.word in self.stop_words):\r\n result.append(t.word)\r\n return '/'.join(result)\r\n\r\n\r\n def get_words_text(self): #若已有评论文件则读取,若没有则爬取评论并保存\r\n if os.path.isfile(f'{self.music_id}.txt'):\r\n print('评论文件已存在,读取文件...')\r\n with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:\r\n return f.read()\r\n else:\r\n print('没有默认评论文件,开始爬取评论...')\r\n count = 0\r\n text = []\r\n comments = _content_generator(self.music_id)\r\n for t in comments:\r\n text.append(self._cut_word(t))\r\n\r\n count += 1\r\n print(f'\\r已爬取 {count}条评论', end='')\r\n if count % 100 == 0:\r\n print(f'\\r已爬取 {count}条评论, 休息 2s', end='')\r\n time.sleep(2)\r\n\r\n str_text = '\\n'.join(text)\r\n with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:\r\n f.write(str_text)\r\n print(f'\\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')\r\n return str_text\r\n\r\n def generate(self, **kwargs):\r\n default_kwargs = {\r\n 'background_color': \"white\",\r\n 'width': 1000,\r\n 'height': 860,\r\n 'margin': 2,\r\n 'max_words': 50,\r\n 'stopwords': wordcloud.STOPWORDS,\r\n }\r\n if not self.mask is None:\r\n default_kwargs['mask'] = np.array(Image.open(self.mask))\r\n if not self.font_path is None:\r\n default_kwargs['font_path'] = self.font_path\r\n elif 'font_path' not in kwargs:\r\n raise ValueError('缺少参数 font_path')\r\n default_kwargs.update(kwargs)\r\n\r\n str_text = self.get_words_text()\r\n self.wordcloud = wordcloud.WordCloud(**default_kwargs)\r\n self.img_wordcloud = self.wordcloud.generate(str_text)\r\n\r\n def show_wordcloud(self): #生成词云图\r\n if self.img_wordcloud is None:\r\n self.generate()\r\n\r\n plt.axis('off')\r\n plt.imshow(self.img_wordcloud)\r\n plt.show()\r\n\r\n def to_file(self, filename): #保存到本地\r\n if not hasattr(self, 'wordcloud'):\r\n self.generate()\r\n self.wordcloud.to_file(filename)\r\n\r\ndef get_wordcloud(music_id,mask,font,path): #执行函数\r\n wordcloud_obj = WangYiMusicWordCloud(music_id, mask=mask, font_path=font)\r\n wordcloud_obj.show_wordcloud()\r\n result=path+'\\\\'+'result.jpg'\r\n wordcloud_obj.to_file(result)\r\n\r\n\r\n", "step-ids": [ 17, 20, 21, 24, 33 ] }
[ 17, 20, 21, 24, 33 ]
<|reserved_special_token_0|> def save(): Names = Name.get() Ages = Age.get() Genders = Gender.get() Heights = height.get() weights = weight.get() rollnos = StudentId.get() Sports = Sport.get() cursor.execute( """ INSERT INTO Students(Name, Age, Gender, Height,_weight,StudentId) VALUES (?,?,?,?,?,?)""" , (Names, Ages, Genders, Heights, weights, rollnos)) conn.commit() cursor.execute( """ INSERT INTO Activity(Name,StudentId,Activity) VALUES (?,?,?) """ , (Names, rollnos, Sports)) conn.commit() clearfields() messagebox.showinfo('Tkinter', 'Saved successfully!') def delete(): x = StudentId.get() cursor.execute( """ DELETE FROM Students WHERE StudentId = (?)""", x) conn.commit() cursor.execute( """ DELETE FROM Activity WHERE StudentId = (?)""", x) clearfields() messagebox.showinfo('Tkinter', 'Deleted successfully!') def Search(): Names = Name.get() Ages = Age.get() Genders = Gender.get() Heights = height.get() Weights = weight.get() Rollnos = StudentId.get() Sports = Sport.get() t = tree.get_children() for f in t: tree.delete(f) if len(Names) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Activity B on A.StudentId=B.StudentId where A.Name like(?)""" , Names) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Ages) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.Age like(?)""" , Ages) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Genders) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.Gender like(?)""" , Genders) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Heights) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.Height like(?)""" , Heights) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Weights) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A._Weight like(?)""" , Weights) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Rollnos) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.StudentId like(?)""" , Rollnos) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Sports) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where B.Activity like(?)""" , Sports) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) else: messagebox.showinfo('Tkinter', 'Atleast one search criteria must be given!') def clearfields(): Name.delete(0, tk.END) Age.delete(0, tk.END) Gender.delete(0, tk.END) height.delete(0, tk.END) weight.delete(0, tk.END) StudentId.delete(0, tk.END) Sport.delete(0, tk.END) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def save(): Names = Name.get() Ages = Age.get() Genders = Gender.get() Heights = height.get() weights = weight.get() rollnos = StudentId.get() Sports = Sport.get() cursor.execute( """ INSERT INTO Students(Name, Age, Gender, Height,_weight,StudentId) VALUES (?,?,?,?,?,?)""" , (Names, Ages, Genders, Heights, weights, rollnos)) conn.commit() cursor.execute( """ INSERT INTO Activity(Name,StudentId,Activity) VALUES (?,?,?) """ , (Names, rollnos, Sports)) conn.commit() clearfields() messagebox.showinfo('Tkinter', 'Saved successfully!') def delete(): x = StudentId.get() cursor.execute( """ DELETE FROM Students WHERE StudentId = (?)""", x) conn.commit() cursor.execute( """ DELETE FROM Activity WHERE StudentId = (?)""", x) clearfields() messagebox.showinfo('Tkinter', 'Deleted successfully!') def Search(): Names = Name.get() Ages = Age.get() Genders = Gender.get() Heights = height.get() Weights = weight.get() Rollnos = StudentId.get() Sports = Sport.get() t = tree.get_children() for f in t: tree.delete(f) if len(Names) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Activity B on A.StudentId=B.StudentId where A.Name like(?)""" , Names) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Ages) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.Age like(?)""" , Ages) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Genders) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.Gender like(?)""" , Genders) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Heights) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.Height like(?)""" , Heights) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Weights) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A._Weight like(?)""" , Weights) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Rollnos) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.StudentId like(?)""" , Rollnos) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Sports) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where B.Activity like(?)""" , Sports) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) else: messagebox.showinfo('Tkinter', 'Atleast one search criteria must be given!') def clearfields(): Name.delete(0, tk.END) Age.delete(0, tk.END) Gender.delete(0, tk.END) height.delete(0, tk.END) weight.delete(0, tk.END) StudentId.delete(0, tk.END) Sport.delete(0, tk.END) <|reserved_special_token_0|> canvas1.pack() <|reserved_special_token_0|> canvas1.create_window(300, 10, window=Name) <|reserved_special_token_0|> label1.config(font=('helvetica', 10)) canvas1.create_window(200, 10, window=label1) <|reserved_special_token_0|> canvas1.create_window(300, 40, window=Age) <|reserved_special_token_0|> label2.config(font=('helvetica', 10)) canvas1.create_window(200, 40, window=label2) <|reserved_special_token_0|> canvas1.create_window(300, 70, window=Gender) <|reserved_special_token_0|> label3.config(font=('helvetica', 10)) canvas1.create_window(200, 70, window=label3) <|reserved_special_token_0|> canvas1.create_window(300, 100, window=height) <|reserved_special_token_0|> label4.config(font=('helvetica', 10)) canvas1.create_window(200, 100, window=label4) <|reserved_special_token_0|> canvas1.create_window(300, 130, window=weight) <|reserved_special_token_0|> label5.config(font=('helvetica', 10)) canvas1.create_window(200, 130, window=label5) <|reserved_special_token_0|> canvas1.create_window(300, 160, window=StudentId) <|reserved_special_token_0|> label6.config(font=('helvetica', 10)) canvas1.create_window(200, 160, window=label6) <|reserved_special_token_0|> canvas1.create_window(300, 190, window=Sport) <|reserved_special_token_0|> label7.config(font=('helvetica', 10)) canvas1.create_window(200, 190, window=label7) <|reserved_special_token_0|> canvas1.create_window(500, 250, window=button1) <|reserved_special_token_0|> canvas1.create_window(400, 250, window=button5) <|reserved_special_token_0|> canvas1.create_window(450, 250, window=button3) <|reserved_special_token_0|> tree.column('#0', width=130, minwidth=270, stretch=tk.NO) tree.column('one', width=100, minwidth=150, stretch=tk.NO) tree.column('two', width=100, minwidth=100) tree.column('three', width=100, minwidth=50, stretch=tk.NO) tree.column('three', width=100, minwidth=50, stretch=tk.NO) tree.column('three', width=100, minwidth=50, stretch=tk.NO) tree.heading('#0', text='Name', anchor=tk.W) tree.heading('one', text='Age', anchor=tk.W) tree.heading('two', text='Gender', anchor=tk.W) tree.heading('three', text='Height', anchor=tk.W) tree.heading('four', text='Weight', anchor=tk.W) tree.heading('five', text='StudentId', anchor=tk.W) tree.heading('six', text='Sports', anchor=tk.W) tree.pack() root.mainloop() <|reserved_special_token_1|> <|reserved_special_token_0|> conn = pyodbc.connect( 'Driver={SQL Server};Server=MUTHUCOMPUTER;Database=Class4c v1;Trusted_Connection=yes;' ) cursor = conn.cursor() def save(): Names = Name.get() Ages = Age.get() Genders = Gender.get() Heights = height.get() weights = weight.get() rollnos = StudentId.get() Sports = Sport.get() cursor.execute( """ INSERT INTO Students(Name, Age, Gender, Height,_weight,StudentId) VALUES (?,?,?,?,?,?)""" , (Names, Ages, Genders, Heights, weights, rollnos)) conn.commit() cursor.execute( """ INSERT INTO Activity(Name,StudentId,Activity) VALUES (?,?,?) """ , (Names, rollnos, Sports)) conn.commit() clearfields() messagebox.showinfo('Tkinter', 'Saved successfully!') def delete(): x = StudentId.get() cursor.execute( """ DELETE FROM Students WHERE StudentId = (?)""", x) conn.commit() cursor.execute( """ DELETE FROM Activity WHERE StudentId = (?)""", x) clearfields() messagebox.showinfo('Tkinter', 'Deleted successfully!') def Search(): Names = Name.get() Ages = Age.get() Genders = Gender.get() Heights = height.get() Weights = weight.get() Rollnos = StudentId.get() Sports = Sport.get() t = tree.get_children() for f in t: tree.delete(f) if len(Names) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Activity B on A.StudentId=B.StudentId where A.Name like(?)""" , Names) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Ages) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.Age like(?)""" , Ages) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Genders) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.Gender like(?)""" , Genders) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Heights) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.Height like(?)""" , Heights) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Weights) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A._Weight like(?)""" , Weights) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Rollnos) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.StudentId like(?)""" , Rollnos) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Sports) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where B.Activity like(?)""" , Sports) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) else: messagebox.showinfo('Tkinter', 'Atleast one search criteria must be given!') def clearfields(): Name.delete(0, tk.END) Age.delete(0, tk.END) Gender.delete(0, tk.END) height.delete(0, tk.END) weight.delete(0, tk.END) StudentId.delete(0, tk.END) Sport.delete(0, tk.END) root = tk.Tk() canvas1 = tk.Canvas(root, width=900, height=300) canvas1.pack() Name = tk.Entry(root) canvas1.create_window(300, 10, window=Name) label1 = tk.Label(root, text='Name:') label1.config(font=('helvetica', 10)) canvas1.create_window(200, 10, window=label1) Age = tk.Entry(root) canvas1.create_window(300, 40, window=Age) label2 = tk.Label(root, text='Age:') label2.config(font=('helvetica', 10)) canvas1.create_window(200, 40, window=label2) Gender = tk.Entry(root) canvas1.create_window(300, 70, window=Gender) label3 = tk.Label(root, text='Gender:') label3.config(font=('helvetica', 10)) canvas1.create_window(200, 70, window=label3) height = tk.Entry(root) canvas1.create_window(300, 100, window=height) label4 = tk.Label(root, text='height in cm:') label4.config(font=('helvetica', 10)) canvas1.create_window(200, 100, window=label4) weight = tk.Entry(root) canvas1.create_window(300, 130, window=weight) label5 = tk.Label(root, text='weight in kg:') label5.config(font=('helvetica', 10)) canvas1.create_window(200, 130, window=label5) StudentId = tk.Entry(root) canvas1.create_window(300, 160, window=StudentId) label6 = tk.Label(root, text='StudentId:') label6.config(font=('helvetica', 10)) canvas1.create_window(200, 160, window=label6) Sport = tk.Entry(root) canvas1.create_window(300, 190, window=Sport) label7 = tk.Label(root, text='Sport:') label7.config(font=('helvetica', 10)) canvas1.create_window(200, 190, window=label7) button1 = tk.Button(text='Save', command=save) canvas1.create_window(500, 250, window=button1) button5 = tk.Button(text='Search', command=Search) canvas1.create_window(400, 250, window=button5) button3 = tk.Button(text='delete', command=delete) canvas1.create_window(450, 250, window=button3) tree = ttk.Treeview(root) tree['columns'] = 'one', 'two', 'three', 'four', 'five', 'six' tree.column('#0', width=130, minwidth=270, stretch=tk.NO) tree.column('one', width=100, minwidth=150, stretch=tk.NO) tree.column('two', width=100, minwidth=100) tree.column('three', width=100, minwidth=50, stretch=tk.NO) tree.column('three', width=100, minwidth=50, stretch=tk.NO) tree.column('three', width=100, minwidth=50, stretch=tk.NO) tree.heading('#0', text='Name', anchor=tk.W) tree.heading('one', text='Age', anchor=tk.W) tree.heading('two', text='Gender', anchor=tk.W) tree.heading('three', text='Height', anchor=tk.W) tree.heading('four', text='Weight', anchor=tk.W) tree.heading('five', text='StudentId', anchor=tk.W) tree.heading('six', text='Sports', anchor=tk.W) tree.pack() root.mainloop() <|reserved_special_token_1|> from tkinter import ttk import tkinter as tk import pyodbc from tkinter import messagebox conn = pyodbc.connect( 'Driver={SQL Server};Server=MUTHUCOMPUTER;Database=Class4c v1;Trusted_Connection=yes;' ) cursor = conn.cursor() def save(): Names = Name.get() Ages = Age.get() Genders = Gender.get() Heights = height.get() weights = weight.get() rollnos = StudentId.get() Sports = Sport.get() cursor.execute( """ INSERT INTO Students(Name, Age, Gender, Height,_weight,StudentId) VALUES (?,?,?,?,?,?)""" , (Names, Ages, Genders, Heights, weights, rollnos)) conn.commit() cursor.execute( """ INSERT INTO Activity(Name,StudentId,Activity) VALUES (?,?,?) """ , (Names, rollnos, Sports)) conn.commit() clearfields() messagebox.showinfo('Tkinter', 'Saved successfully!') def delete(): x = StudentId.get() cursor.execute( """ DELETE FROM Students WHERE StudentId = (?)""", x) conn.commit() cursor.execute( """ DELETE FROM Activity WHERE StudentId = (?)""", x) clearfields() messagebox.showinfo('Tkinter', 'Deleted successfully!') def Search(): Names = Name.get() Ages = Age.get() Genders = Gender.get() Heights = height.get() Weights = weight.get() Rollnos = StudentId.get() Sports = Sport.get() t = tree.get_children() for f in t: tree.delete(f) if len(Names) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Activity B on A.StudentId=B.StudentId where A.Name like(?)""" , Names) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Ages) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.Age like(?)""" , Ages) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Genders) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.Gender like(?)""" , Genders) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Heights) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.Height like(?)""" , Heights) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Weights) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A._Weight like(?)""" , Weights) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Rollnos) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.StudentId like(?)""" , Rollnos) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) elif len(Sports) != 0: cursor.execute( """select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where B.Activity like(?)""" , Sports) records = cursor.fetchall() for row in records: tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3], row[4], row[5], row[6])) tree.pack(side=tk.TOP, fill=tk.X) else: messagebox.showinfo('Tkinter', 'Atleast one search criteria must be given!') def clearfields(): Name.delete(0, tk.END) Age.delete(0, tk.END) Gender.delete(0, tk.END) height.delete(0, tk.END) weight.delete(0, tk.END) StudentId.delete(0, tk.END) Sport.delete(0, tk.END) root = tk.Tk() canvas1 = tk.Canvas(root, width=900, height=300) canvas1.pack() Name = tk.Entry(root) canvas1.create_window(300, 10, window=Name) label1 = tk.Label(root, text='Name:') label1.config(font=('helvetica', 10)) canvas1.create_window(200, 10, window=label1) Age = tk.Entry(root) canvas1.create_window(300, 40, window=Age) label2 = tk.Label(root, text='Age:') label2.config(font=('helvetica', 10)) canvas1.create_window(200, 40, window=label2) Gender = tk.Entry(root) canvas1.create_window(300, 70, window=Gender) label3 = tk.Label(root, text='Gender:') label3.config(font=('helvetica', 10)) canvas1.create_window(200, 70, window=label3) height = tk.Entry(root) canvas1.create_window(300, 100, window=height) label4 = tk.Label(root, text='height in cm:') label4.config(font=('helvetica', 10)) canvas1.create_window(200, 100, window=label4) weight = tk.Entry(root) canvas1.create_window(300, 130, window=weight) label5 = tk.Label(root, text='weight in kg:') label5.config(font=('helvetica', 10)) canvas1.create_window(200, 130, window=label5) StudentId = tk.Entry(root) canvas1.create_window(300, 160, window=StudentId) label6 = tk.Label(root, text='StudentId:') label6.config(font=('helvetica', 10)) canvas1.create_window(200, 160, window=label6) Sport = tk.Entry(root) canvas1.create_window(300, 190, window=Sport) label7 = tk.Label(root, text='Sport:') label7.config(font=('helvetica', 10)) canvas1.create_window(200, 190, window=label7) button1 = tk.Button(text='Save', command=save) canvas1.create_window(500, 250, window=button1) button5 = tk.Button(text='Search', command=Search) canvas1.create_window(400, 250, window=button5) button3 = tk.Button(text='delete', command=delete) canvas1.create_window(450, 250, window=button3) tree = ttk.Treeview(root) tree['columns'] = 'one', 'two', 'three', 'four', 'five', 'six' tree.column('#0', width=130, minwidth=270, stretch=tk.NO) tree.column('one', width=100, minwidth=150, stretch=tk.NO) tree.column('two', width=100, minwidth=100) tree.column('three', width=100, minwidth=50, stretch=tk.NO) tree.column('three', width=100, minwidth=50, stretch=tk.NO) tree.column('three', width=100, minwidth=50, stretch=tk.NO) tree.heading('#0', text='Name', anchor=tk.W) tree.heading('one', text='Age', anchor=tk.W) tree.heading('two', text='Gender', anchor=tk.W) tree.heading('three', text='Height', anchor=tk.W) tree.heading('four', text='Weight', anchor=tk.W) tree.heading('five', text='StudentId', anchor=tk.W) tree.heading('six', text='Sports', anchor=tk.W) tree.pack() root.mainloop() <|reserved_special_token_1|> from tkinter import ttk import tkinter as tk import pyodbc #ConnectingDatabase# from tkinter import messagebox conn = pyodbc.connect('Driver={SQL Server};' 'Server=MUTHUCOMPUTER;' 'Database=Class4c v1;' 'Trusted_Connection=yes;') cursor = conn.cursor() #Adding new record# def save(): Names= Name.get() Ages= Age.get() Genders= Gender.get() Heights= height.get() weights= weight.get() rollnos= StudentId.get() Sports=Sport.get() cursor.execute(""" INSERT INTO Students(Name, Age, Gender, Height,_weight,StudentId) VALUES (?,?,?,?,?,?)""",(Names,Ages,Genders,Heights,weights,rollnos)) conn.commit() cursor.execute(""" INSERT INTO Activity(Name,StudentId,Activity) VALUES (?,?,?) """,(Names,rollnos,Sports)) conn.commit() clearfields() messagebox.showinfo("Tkinter", "Saved successfully!") #deleting selected record and currently works only with rollnumber def delete(): x=StudentId.get() cursor.execute(""" DELETE FROM Students WHERE StudentId = (?)""",(x)) conn.commit() cursor.execute(""" DELETE FROM Activity WHERE StudentId = (?)""",(x)) clearfields() messagebox.showinfo("Tkinter", "Deleted successfully!") #Searching records def Search(): Names= Name.get() Ages= Age.get() Genders= Gender.get() Heights= height.get() Weights= weight.get() Rollnos= StudentId.get() Sports=Sport.get() # clearing the tree t=tree.get_children() for f in t: tree.delete(f) #Search starts if len(Names)!=0: cursor.execute("""select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Activity B on A.StudentId=B.StudentId where A.Name like(?)""",(Names)) records=cursor.fetchall() for row in records: tree.insert("", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6])) tree.pack(side=tk.TOP,fill=tk.X) elif len(Ages)!=0: cursor.execute("""select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.Age like(?)""",(Ages)) records=cursor.fetchall() for row in records: tree.insert("", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6])) tree.pack(side=tk.TOP,fill=tk.X) elif len(Genders)!=0: cursor.execute("""select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.Gender like(?)""",(Genders)) records=cursor.fetchall() for row in records: tree.insert("", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6])) tree.pack(side=tk.TOP,fill=tk.X) elif len(Heights)!=0: cursor.execute("""select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.Height like(?)""",(Heights)) records=cursor.fetchall() for row in records: tree.insert("", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6])) tree.pack(side=tk.TOP,fill=tk.X) elif len(Weights)!=0: cursor.execute("""select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A._Weight like(?)""",(Weights)) records=cursor.fetchall() for row in records: tree.insert("", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6])) tree.pack(side=tk.TOP,fill=tk.X) elif len(Rollnos)!=0: cursor.execute("""select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where A.StudentId like(?)""",(Rollnos)) records=cursor.fetchall() for row in records: tree.insert("", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6])) tree.pack(side=tk.TOP,fill=tk.X) elif len(Sports)!=0: cursor.execute("""select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity from Students A inner join Sports B on A.StudentId=B.StudentId where B.Activity like(?)""",(Sports)) records=cursor.fetchall() for row in records: tree.insert("", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6])) tree.pack(side=tk.TOP,fill=tk.X) else: messagebox.showinfo("Tkinter", "Atleast one search criteria must be given!") #Search ends # function to clear all entry fields def clearfields(): Name.delete(0 ,tk.END) Age.delete(0 ,tk.END) Gender.delete(0 ,tk.END) height.delete(0 ,tk.END) weight.delete(0 ,tk.END) StudentId.delete(0 ,tk.END) Sport.delete(0 ,tk.END) # defining the canvas root= tk.Tk() canvas1 = tk.Canvas(root, width = 900, height = 300) canvas1.pack() # Defining the fields and labels and validating Name = tk.Entry (root) canvas1.create_window(300, 10, window=Name) label1 = tk.Label(root, text='Name:') label1.config(font=('helvetica', 10)) canvas1.create_window(200, 10, window=label1) Age = tk.Entry (root) canvas1.create_window(300, 40, window=Age) label2 = tk.Label(root, text='Age:') label2.config(font=('helvetica', 10)) canvas1.create_window(200, 40, window=label2) Gender = tk.Entry (root) canvas1.create_window(300, 70, window=Gender) label3 = tk.Label(root, text='Gender:') label3.config(font=('helvetica', 10)) canvas1.create_window(200, 70, window=label3) height = tk.Entry (root) canvas1.create_window(300, 100, window=height) label4 = tk.Label(root, text='height in cm:') label4.config(font=('helvetica', 10)) canvas1.create_window(200, 100, window=label4) weight = tk.Entry (root) canvas1.create_window(300, 130, window=weight) label5 = tk.Label(root, text='weight in kg:') label5.config(font=('helvetica', 10)) canvas1.create_window(200, 130, window=label5) StudentId = tk.Entry (root) canvas1.create_window(300, 160, window=StudentId) label6 = tk.Label(root, text='StudentId:') label6.config(font=('helvetica', 10)) canvas1.create_window(200, 160, window=label6) Sport = tk.Entry (root) canvas1.create_window(300, 190, window=Sport) label7 = tk.Label(root, text='Sport:') label7.config(font=('helvetica', 10)) canvas1.create_window(200, 190, window=label7) # Defining the buttons button1 = tk.Button(text='Save',command = save) canvas1.create_window(500, 250, window=button1) button5 = tk.Button(text='Search',command=Search) canvas1.create_window(400, 250, window=button5) button3 = tk.Button(text='delete',command=delete) canvas1.create_window(450, 250, window=button3) # Defining the tree tree=ttk.Treeview(root) tree["columns"]=("one","two","three","four","five","six") tree.column("#0", width=130, minwidth=270, stretch=tk.NO) tree.column("one", width=100, minwidth=150, stretch=tk.NO) tree.column("two", width=100, minwidth=100) tree.column("three", width=100, minwidth=50, stretch=tk.NO) tree.column("three", width=100, minwidth=50, stretch=tk.NO) tree.column("three", width=100, minwidth=50, stretch=tk.NO) tree.heading("#0",text="Name",anchor=tk.W) tree.heading("one", text="Age",anchor=tk.W) tree.heading("two", text="Gender",anchor=tk.W) tree.heading("three", text="Height",anchor=tk.W) tree.heading("four", text="Weight",anchor=tk.W) tree.heading("five", text="StudentId",anchor=tk.W) tree.heading("six", text="Sports",anchor=tk.W) tree.pack() root.mainloop()
flexible
{ "blob_id": "8058ff209af03b7365ffad2a9ce2e2805b548f53", "index": 9927, "step-1": "<mask token>\n\n\ndef save():\n Names = Name.get()\n Ages = Age.get()\n Genders = Gender.get()\n Heights = height.get()\n weights = weight.get()\n rollnos = StudentId.get()\n Sports = Sport.get()\n cursor.execute(\n \"\"\"\n INSERT INTO Students(Name, Age, Gender, Height,_weight,StudentId)\n VALUES (?,?,?,?,?,?)\"\"\"\n , (Names, Ages, Genders, Heights, weights, rollnos))\n conn.commit()\n cursor.execute(\n \"\"\"\n INSERT INTO Activity(Name,StudentId,Activity)\n VALUES (?,?,?)\n \"\"\"\n , (Names, rollnos, Sports))\n conn.commit()\n clearfields()\n messagebox.showinfo('Tkinter', 'Saved successfully!')\n\n\ndef delete():\n x = StudentId.get()\n cursor.execute(\n \"\"\"\n DELETE FROM Students\n WHERE StudentId = (?)\"\"\", x)\n conn.commit()\n cursor.execute(\n \"\"\"\n DELETE FROM Activity\n WHERE StudentId = (?)\"\"\", x)\n clearfields()\n messagebox.showinfo('Tkinter', 'Deleted successfully!')\n\n\ndef Search():\n Names = Name.get()\n Ages = Age.get()\n Genders = Gender.get()\n Heights = height.get()\n Weights = weight.get()\n Rollnos = StudentId.get()\n Sports = Sport.get()\n t = tree.get_children()\n for f in t:\n tree.delete(f)\n if len(Names) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Activity B on A.StudentId=B.StudentId where A.Name like(?)\"\"\"\n , Names)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Ages) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Age like(?)\"\"\"\n , Ages)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Genders) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Gender like(?)\"\"\"\n , Genders)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Heights) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Height like(?)\"\"\"\n , Heights)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Weights) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A._Weight like(?)\"\"\"\n , Weights)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Rollnos) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.StudentId like(?)\"\"\"\n , Rollnos)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Sports) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where B.Activity like(?)\"\"\"\n , Sports)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n else:\n messagebox.showinfo('Tkinter',\n 'Atleast one search criteria must be given!')\n\n\ndef clearfields():\n Name.delete(0, tk.END)\n Age.delete(0, tk.END)\n Gender.delete(0, tk.END)\n height.delete(0, tk.END)\n weight.delete(0, tk.END)\n StudentId.delete(0, tk.END)\n Sport.delete(0, tk.END)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef save():\n Names = Name.get()\n Ages = Age.get()\n Genders = Gender.get()\n Heights = height.get()\n weights = weight.get()\n rollnos = StudentId.get()\n Sports = Sport.get()\n cursor.execute(\n \"\"\"\n INSERT INTO Students(Name, Age, Gender, Height,_weight,StudentId)\n VALUES (?,?,?,?,?,?)\"\"\"\n , (Names, Ages, Genders, Heights, weights, rollnos))\n conn.commit()\n cursor.execute(\n \"\"\"\n INSERT INTO Activity(Name,StudentId,Activity)\n VALUES (?,?,?)\n \"\"\"\n , (Names, rollnos, Sports))\n conn.commit()\n clearfields()\n messagebox.showinfo('Tkinter', 'Saved successfully!')\n\n\ndef delete():\n x = StudentId.get()\n cursor.execute(\n \"\"\"\n DELETE FROM Students\n WHERE StudentId = (?)\"\"\", x)\n conn.commit()\n cursor.execute(\n \"\"\"\n DELETE FROM Activity\n WHERE StudentId = (?)\"\"\", x)\n clearfields()\n messagebox.showinfo('Tkinter', 'Deleted successfully!')\n\n\ndef Search():\n Names = Name.get()\n Ages = Age.get()\n Genders = Gender.get()\n Heights = height.get()\n Weights = weight.get()\n Rollnos = StudentId.get()\n Sports = Sport.get()\n t = tree.get_children()\n for f in t:\n tree.delete(f)\n if len(Names) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Activity B on A.StudentId=B.StudentId where A.Name like(?)\"\"\"\n , Names)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Ages) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Age like(?)\"\"\"\n , Ages)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Genders) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Gender like(?)\"\"\"\n , Genders)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Heights) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Height like(?)\"\"\"\n , Heights)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Weights) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A._Weight like(?)\"\"\"\n , Weights)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Rollnos) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.StudentId like(?)\"\"\"\n , Rollnos)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Sports) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where B.Activity like(?)\"\"\"\n , Sports)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n else:\n messagebox.showinfo('Tkinter',\n 'Atleast one search criteria must be given!')\n\n\ndef clearfields():\n Name.delete(0, tk.END)\n Age.delete(0, tk.END)\n Gender.delete(0, tk.END)\n height.delete(0, tk.END)\n weight.delete(0, tk.END)\n StudentId.delete(0, tk.END)\n Sport.delete(0, tk.END)\n\n\n<mask token>\ncanvas1.pack()\n<mask token>\ncanvas1.create_window(300, 10, window=Name)\n<mask token>\nlabel1.config(font=('helvetica', 10))\ncanvas1.create_window(200, 10, window=label1)\n<mask token>\ncanvas1.create_window(300, 40, window=Age)\n<mask token>\nlabel2.config(font=('helvetica', 10))\ncanvas1.create_window(200, 40, window=label2)\n<mask token>\ncanvas1.create_window(300, 70, window=Gender)\n<mask token>\nlabel3.config(font=('helvetica', 10))\ncanvas1.create_window(200, 70, window=label3)\n<mask token>\ncanvas1.create_window(300, 100, window=height)\n<mask token>\nlabel4.config(font=('helvetica', 10))\ncanvas1.create_window(200, 100, window=label4)\n<mask token>\ncanvas1.create_window(300, 130, window=weight)\n<mask token>\nlabel5.config(font=('helvetica', 10))\ncanvas1.create_window(200, 130, window=label5)\n<mask token>\ncanvas1.create_window(300, 160, window=StudentId)\n<mask token>\nlabel6.config(font=('helvetica', 10))\ncanvas1.create_window(200, 160, window=label6)\n<mask token>\ncanvas1.create_window(300, 190, window=Sport)\n<mask token>\nlabel7.config(font=('helvetica', 10))\ncanvas1.create_window(200, 190, window=label7)\n<mask token>\ncanvas1.create_window(500, 250, window=button1)\n<mask token>\ncanvas1.create_window(400, 250, window=button5)\n<mask token>\ncanvas1.create_window(450, 250, window=button3)\n<mask token>\ntree.column('#0', width=130, minwidth=270, stretch=tk.NO)\ntree.column('one', width=100, minwidth=150, stretch=tk.NO)\ntree.column('two', width=100, minwidth=100)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.heading('#0', text='Name', anchor=tk.W)\ntree.heading('one', text='Age', anchor=tk.W)\ntree.heading('two', text='Gender', anchor=tk.W)\ntree.heading('three', text='Height', anchor=tk.W)\ntree.heading('four', text='Weight', anchor=tk.W)\ntree.heading('five', text='StudentId', anchor=tk.W)\ntree.heading('six', text='Sports', anchor=tk.W)\ntree.pack()\nroot.mainloop()\n", "step-3": "<mask token>\nconn = pyodbc.connect(\n 'Driver={SQL Server};Server=MUTHUCOMPUTER;Database=Class4c v1;Trusted_Connection=yes;'\n )\ncursor = conn.cursor()\n\n\ndef save():\n Names = Name.get()\n Ages = Age.get()\n Genders = Gender.get()\n Heights = height.get()\n weights = weight.get()\n rollnos = StudentId.get()\n Sports = Sport.get()\n cursor.execute(\n \"\"\"\n INSERT INTO Students(Name, Age, Gender, Height,_weight,StudentId)\n VALUES (?,?,?,?,?,?)\"\"\"\n , (Names, Ages, Genders, Heights, weights, rollnos))\n conn.commit()\n cursor.execute(\n \"\"\"\n INSERT INTO Activity(Name,StudentId,Activity)\n VALUES (?,?,?)\n \"\"\"\n , (Names, rollnos, Sports))\n conn.commit()\n clearfields()\n messagebox.showinfo('Tkinter', 'Saved successfully!')\n\n\ndef delete():\n x = StudentId.get()\n cursor.execute(\n \"\"\"\n DELETE FROM Students\n WHERE StudentId = (?)\"\"\", x)\n conn.commit()\n cursor.execute(\n \"\"\"\n DELETE FROM Activity\n WHERE StudentId = (?)\"\"\", x)\n clearfields()\n messagebox.showinfo('Tkinter', 'Deleted successfully!')\n\n\ndef Search():\n Names = Name.get()\n Ages = Age.get()\n Genders = Gender.get()\n Heights = height.get()\n Weights = weight.get()\n Rollnos = StudentId.get()\n Sports = Sport.get()\n t = tree.get_children()\n for f in t:\n tree.delete(f)\n if len(Names) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Activity B on A.StudentId=B.StudentId where A.Name like(?)\"\"\"\n , Names)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Ages) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Age like(?)\"\"\"\n , Ages)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Genders) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Gender like(?)\"\"\"\n , Genders)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Heights) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Height like(?)\"\"\"\n , Heights)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Weights) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A._Weight like(?)\"\"\"\n , Weights)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Rollnos) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.StudentId like(?)\"\"\"\n , Rollnos)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Sports) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where B.Activity like(?)\"\"\"\n , Sports)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n else:\n messagebox.showinfo('Tkinter',\n 'Atleast one search criteria must be given!')\n\n\ndef clearfields():\n Name.delete(0, tk.END)\n Age.delete(0, tk.END)\n Gender.delete(0, tk.END)\n height.delete(0, tk.END)\n weight.delete(0, tk.END)\n StudentId.delete(0, tk.END)\n Sport.delete(0, tk.END)\n\n\nroot = tk.Tk()\ncanvas1 = tk.Canvas(root, width=900, height=300)\ncanvas1.pack()\nName = tk.Entry(root)\ncanvas1.create_window(300, 10, window=Name)\nlabel1 = tk.Label(root, text='Name:')\nlabel1.config(font=('helvetica', 10))\ncanvas1.create_window(200, 10, window=label1)\nAge = tk.Entry(root)\ncanvas1.create_window(300, 40, window=Age)\nlabel2 = tk.Label(root, text='Age:')\nlabel2.config(font=('helvetica', 10))\ncanvas1.create_window(200, 40, window=label2)\nGender = tk.Entry(root)\ncanvas1.create_window(300, 70, window=Gender)\nlabel3 = tk.Label(root, text='Gender:')\nlabel3.config(font=('helvetica', 10))\ncanvas1.create_window(200, 70, window=label3)\nheight = tk.Entry(root)\ncanvas1.create_window(300, 100, window=height)\nlabel4 = tk.Label(root, text='height in cm:')\nlabel4.config(font=('helvetica', 10))\ncanvas1.create_window(200, 100, window=label4)\nweight = tk.Entry(root)\ncanvas1.create_window(300, 130, window=weight)\nlabel5 = tk.Label(root, text='weight in kg:')\nlabel5.config(font=('helvetica', 10))\ncanvas1.create_window(200, 130, window=label5)\nStudentId = tk.Entry(root)\ncanvas1.create_window(300, 160, window=StudentId)\nlabel6 = tk.Label(root, text='StudentId:')\nlabel6.config(font=('helvetica', 10))\ncanvas1.create_window(200, 160, window=label6)\nSport = tk.Entry(root)\ncanvas1.create_window(300, 190, window=Sport)\nlabel7 = tk.Label(root, text='Sport:')\nlabel7.config(font=('helvetica', 10))\ncanvas1.create_window(200, 190, window=label7)\nbutton1 = tk.Button(text='Save', command=save)\ncanvas1.create_window(500, 250, window=button1)\nbutton5 = tk.Button(text='Search', command=Search)\ncanvas1.create_window(400, 250, window=button5)\nbutton3 = tk.Button(text='delete', command=delete)\ncanvas1.create_window(450, 250, window=button3)\ntree = ttk.Treeview(root)\ntree['columns'] = 'one', 'two', 'three', 'four', 'five', 'six'\ntree.column('#0', width=130, minwidth=270, stretch=tk.NO)\ntree.column('one', width=100, minwidth=150, stretch=tk.NO)\ntree.column('two', width=100, minwidth=100)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.heading('#0', text='Name', anchor=tk.W)\ntree.heading('one', text='Age', anchor=tk.W)\ntree.heading('two', text='Gender', anchor=tk.W)\ntree.heading('three', text='Height', anchor=tk.W)\ntree.heading('four', text='Weight', anchor=tk.W)\ntree.heading('five', text='StudentId', anchor=tk.W)\ntree.heading('six', text='Sports', anchor=tk.W)\ntree.pack()\nroot.mainloop()\n", "step-4": "from tkinter import ttk\nimport tkinter as tk\nimport pyodbc\nfrom tkinter import messagebox\nconn = pyodbc.connect(\n 'Driver={SQL Server};Server=MUTHUCOMPUTER;Database=Class4c v1;Trusted_Connection=yes;'\n )\ncursor = conn.cursor()\n\n\ndef save():\n Names = Name.get()\n Ages = Age.get()\n Genders = Gender.get()\n Heights = height.get()\n weights = weight.get()\n rollnos = StudentId.get()\n Sports = Sport.get()\n cursor.execute(\n \"\"\"\n INSERT INTO Students(Name, Age, Gender, Height,_weight,StudentId)\n VALUES (?,?,?,?,?,?)\"\"\"\n , (Names, Ages, Genders, Heights, weights, rollnos))\n conn.commit()\n cursor.execute(\n \"\"\"\n INSERT INTO Activity(Name,StudentId,Activity)\n VALUES (?,?,?)\n \"\"\"\n , (Names, rollnos, Sports))\n conn.commit()\n clearfields()\n messagebox.showinfo('Tkinter', 'Saved successfully!')\n\n\ndef delete():\n x = StudentId.get()\n cursor.execute(\n \"\"\"\n DELETE FROM Students\n WHERE StudentId = (?)\"\"\", x)\n conn.commit()\n cursor.execute(\n \"\"\"\n DELETE FROM Activity\n WHERE StudentId = (?)\"\"\", x)\n clearfields()\n messagebox.showinfo('Tkinter', 'Deleted successfully!')\n\n\ndef Search():\n Names = Name.get()\n Ages = Age.get()\n Genders = Gender.get()\n Heights = height.get()\n Weights = weight.get()\n Rollnos = StudentId.get()\n Sports = Sport.get()\n t = tree.get_children()\n for f in t:\n tree.delete(f)\n if len(Names) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Activity B on A.StudentId=B.StudentId where A.Name like(?)\"\"\"\n , Names)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Ages) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Age like(?)\"\"\"\n , Ages)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Genders) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Gender like(?)\"\"\"\n , Genders)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Heights) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Height like(?)\"\"\"\n , Heights)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Weights) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A._Weight like(?)\"\"\"\n , Weights)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Rollnos) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.StudentId like(?)\"\"\"\n , Rollnos)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n elif len(Sports) != 0:\n cursor.execute(\n \"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\n from Students A inner join Sports B on A.StudentId=B.StudentId where B.Activity like(?)\"\"\"\n , Sports)\n records = cursor.fetchall()\n for row in records:\n tree.insert('', 3, text=row[0], values=(row[1], row[2], row[3],\n row[4], row[5], row[6]))\n tree.pack(side=tk.TOP, fill=tk.X)\n else:\n messagebox.showinfo('Tkinter',\n 'Atleast one search criteria must be given!')\n\n\ndef clearfields():\n Name.delete(0, tk.END)\n Age.delete(0, tk.END)\n Gender.delete(0, tk.END)\n height.delete(0, tk.END)\n weight.delete(0, tk.END)\n StudentId.delete(0, tk.END)\n Sport.delete(0, tk.END)\n\n\nroot = tk.Tk()\ncanvas1 = tk.Canvas(root, width=900, height=300)\ncanvas1.pack()\nName = tk.Entry(root)\ncanvas1.create_window(300, 10, window=Name)\nlabel1 = tk.Label(root, text='Name:')\nlabel1.config(font=('helvetica', 10))\ncanvas1.create_window(200, 10, window=label1)\nAge = tk.Entry(root)\ncanvas1.create_window(300, 40, window=Age)\nlabel2 = tk.Label(root, text='Age:')\nlabel2.config(font=('helvetica', 10))\ncanvas1.create_window(200, 40, window=label2)\nGender = tk.Entry(root)\ncanvas1.create_window(300, 70, window=Gender)\nlabel3 = tk.Label(root, text='Gender:')\nlabel3.config(font=('helvetica', 10))\ncanvas1.create_window(200, 70, window=label3)\nheight = tk.Entry(root)\ncanvas1.create_window(300, 100, window=height)\nlabel4 = tk.Label(root, text='height in cm:')\nlabel4.config(font=('helvetica', 10))\ncanvas1.create_window(200, 100, window=label4)\nweight = tk.Entry(root)\ncanvas1.create_window(300, 130, window=weight)\nlabel5 = tk.Label(root, text='weight in kg:')\nlabel5.config(font=('helvetica', 10))\ncanvas1.create_window(200, 130, window=label5)\nStudentId = tk.Entry(root)\ncanvas1.create_window(300, 160, window=StudentId)\nlabel6 = tk.Label(root, text='StudentId:')\nlabel6.config(font=('helvetica', 10))\ncanvas1.create_window(200, 160, window=label6)\nSport = tk.Entry(root)\ncanvas1.create_window(300, 190, window=Sport)\nlabel7 = tk.Label(root, text='Sport:')\nlabel7.config(font=('helvetica', 10))\ncanvas1.create_window(200, 190, window=label7)\nbutton1 = tk.Button(text='Save', command=save)\ncanvas1.create_window(500, 250, window=button1)\nbutton5 = tk.Button(text='Search', command=Search)\ncanvas1.create_window(400, 250, window=button5)\nbutton3 = tk.Button(text='delete', command=delete)\ncanvas1.create_window(450, 250, window=button3)\ntree = ttk.Treeview(root)\ntree['columns'] = 'one', 'two', 'three', 'four', 'five', 'six'\ntree.column('#0', width=130, minwidth=270, stretch=tk.NO)\ntree.column('one', width=100, minwidth=150, stretch=tk.NO)\ntree.column('two', width=100, minwidth=100)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.column('three', width=100, minwidth=50, stretch=tk.NO)\ntree.heading('#0', text='Name', anchor=tk.W)\ntree.heading('one', text='Age', anchor=tk.W)\ntree.heading('two', text='Gender', anchor=tk.W)\ntree.heading('three', text='Height', anchor=tk.W)\ntree.heading('four', text='Weight', anchor=tk.W)\ntree.heading('five', text='StudentId', anchor=tk.W)\ntree.heading('six', text='Sports', anchor=tk.W)\ntree.pack()\nroot.mainloop()\n", "step-5": "from tkinter import ttk\r\nimport tkinter as tk\r\nimport pyodbc\r\n\r\n\r\n#ConnectingDatabase#\r\n\r\nfrom tkinter import messagebox\r\nconn = pyodbc.connect('Driver={SQL Server};'\r\n 'Server=MUTHUCOMPUTER;'\r\n 'Database=Class4c v1;'\r\n 'Trusted_Connection=yes;')\r\ncursor = conn.cursor()\r\n\r\n\r\n#Adding new record#\r\n\r\ndef save():\r\n Names= Name.get()\r\n Ages= Age.get()\r\n Genders= Gender.get()\r\n Heights= height.get()\r\n weights= weight.get()\r\n rollnos= StudentId.get()\r\n Sports=Sport.get()\r\n\r\n cursor.execute(\"\"\"\r\n INSERT INTO Students(Name, Age, Gender, Height,_weight,StudentId)\r\n VALUES (?,?,?,?,?,?)\"\"\",(Names,Ages,Genders,Heights,weights,rollnos))\r\n conn.commit()\r\n cursor.execute(\"\"\"\r\n INSERT INTO Activity(Name,StudentId,Activity)\r\n VALUES (?,?,?)\r\n \"\"\",(Names,rollnos,Sports))\r\n conn.commit()\r\n clearfields()\r\n messagebox.showinfo(\"Tkinter\", \"Saved successfully!\")\r\n\r\n\r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n#deleting selected record and currently works only with rollnumber\r\n \r\n \r\ndef delete():\r\n x=StudentId.get()\r\n cursor.execute(\"\"\"\r\n DELETE FROM Students\r\n WHERE StudentId = (?)\"\"\",(x))\r\n conn.commit()\r\n cursor.execute(\"\"\"\r\n DELETE FROM Activity\r\n WHERE StudentId = (?)\"\"\",(x))\r\n clearfields()\r\n messagebox.showinfo(\"Tkinter\", \"Deleted successfully!\")\r\n \r\n\r\n#Searching records \r\n\r\ndef Search():\r\n \r\n Names= Name.get()\r\n Ages= Age.get()\r\n Genders= Gender.get()\r\n Heights= height.get()\r\n Weights= weight.get()\r\n Rollnos= StudentId.get()\r\n Sports=Sport.get()\r\n\r\n# clearing the tree\r\n \r\n t=tree.get_children()\r\n for f in t:\r\n tree.delete(f)\r\n \r\n\r\n#Search starts\r\n \r\n\r\n if len(Names)!=0:\r\n cursor.execute(\"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\r\n from Students A inner join Activity B on A.StudentId=B.StudentId where A.Name like(?)\"\"\",(Names))\r\n records=cursor.fetchall()\r\n for row in records:\r\n tree.insert(\"\", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))\r\n tree.pack(side=tk.TOP,fill=tk.X)\r\n \r\n\t\t\r\n elif len(Ages)!=0:\r\n cursor.execute(\"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\r\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Age like(?)\"\"\",(Ages))\r\n records=cursor.fetchall()\r\n for row in records:\r\n tree.insert(\"\", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))\r\n tree.pack(side=tk.TOP,fill=tk.X)\r\n\r\n\r\n elif len(Genders)!=0:\r\n cursor.execute(\"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\r\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Gender like(?)\"\"\",(Genders))\r\n records=cursor.fetchall()\r\n for row in records:\r\n tree.insert(\"\", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))\r\n tree.pack(side=tk.TOP,fill=tk.X)\r\n\r\n\r\n elif len(Heights)!=0:\r\n cursor.execute(\"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\r\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.Height like(?)\"\"\",(Heights))\r\n records=cursor.fetchall()\r\n for row in records:\r\n tree.insert(\"\", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))\r\n tree.pack(side=tk.TOP,fill=tk.X) \r\n\r\n\r\n elif len(Weights)!=0:\r\n cursor.execute(\"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\r\n from Students A inner join Sports B on A.StudentId=B.StudentId where A._Weight like(?)\"\"\",(Weights))\r\n records=cursor.fetchall()\r\n for row in records:\r\n tree.insert(\"\", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))\r\n tree.pack(side=tk.TOP,fill=tk.X)\r\n\r\n\r\n elif len(Rollnos)!=0:\r\n cursor.execute(\"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\r\n from Students A inner join Sports B on A.StudentId=B.StudentId where A.StudentId like(?)\"\"\",(Rollnos))\r\n records=cursor.fetchall()\r\n for row in records:\r\n tree.insert(\"\", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))\r\n tree.pack(side=tk.TOP,fill=tk.X)\r\n\r\n\r\n elif len(Sports)!=0:\r\n cursor.execute(\"\"\"select A.Name,A.Age,A.Gender,A.Height,A._Weight,A.StudentId,B.Activity\r\n from Students A inner join Sports B on A.StudentId=B.StudentId where B.Activity like(?)\"\"\",(Sports))\r\n records=cursor.fetchall()\r\n for row in records:\r\n tree.insert(\"\", 3, text=row[0], values=(row[1],row[2],row[3],row[4],row[5],row[6]))\r\n tree.pack(side=tk.TOP,fill=tk.X)\r\n\r\n else:\r\n \r\n messagebox.showinfo(\"Tkinter\", \"Atleast one search criteria must be given!\") \r\n\r\n#Search ends\r\n\r\n# function to clear all entry fields\r\n\r\ndef clearfields():\r\n Name.delete(0 ,tk.END)\r\n Age.delete(0 ,tk.END)\r\n Gender.delete(0 ,tk.END)\r\n height.delete(0 ,tk.END)\r\n weight.delete(0 ,tk.END)\r\n StudentId.delete(0 ,tk.END)\r\n Sport.delete(0 ,tk.END)\r\n \r\n\r\n\r\n \r\n# defining the canvas\r\n\r\nroot= tk.Tk()\r\ncanvas1 = tk.Canvas(root, width = 900, height = 300)\r\ncanvas1.pack()\r\n\r\n# Defining the fields and labels and validating\r\n\r\nName = tk.Entry (root)\r\ncanvas1.create_window(300, 10, window=Name)\r\nlabel1 = tk.Label(root, text='Name:')\r\nlabel1.config(font=('helvetica', 10))\r\ncanvas1.create_window(200, 10, window=label1)\r\n\r\n\r\nAge = tk.Entry (root)\r\ncanvas1.create_window(300, 40, window=Age)\r\nlabel2 = tk.Label(root, text='Age:')\r\nlabel2.config(font=('helvetica', 10))\r\ncanvas1.create_window(200, 40, window=label2)\r\n\r\nGender = tk.Entry (root)\r\ncanvas1.create_window(300, 70, window=Gender)\r\nlabel3 = tk.Label(root, text='Gender:')\r\nlabel3.config(font=('helvetica', 10))\r\ncanvas1.create_window(200, 70, window=label3)\r\n\r\nheight = tk.Entry (root)\r\ncanvas1.create_window(300, 100, window=height)\r\nlabel4 = tk.Label(root, text='height in cm:')\r\nlabel4.config(font=('helvetica', 10))\r\ncanvas1.create_window(200, 100, window=label4)\r\n\r\nweight = tk.Entry (root)\r\ncanvas1.create_window(300, 130, window=weight)\r\nlabel5 = tk.Label(root, text='weight in kg:')\r\nlabel5.config(font=('helvetica', 10))\r\ncanvas1.create_window(200, 130, window=label5)\r\n\r\nStudentId = tk.Entry (root)\r\ncanvas1.create_window(300, 160, window=StudentId)\r\nlabel6 = tk.Label(root, text='StudentId:')\r\nlabel6.config(font=('helvetica', 10))\r\ncanvas1.create_window(200, 160, window=label6)\r\n\r\nSport = tk.Entry (root)\r\ncanvas1.create_window(300, 190, window=Sport)\r\nlabel7 = tk.Label(root, text='Sport:')\r\nlabel7.config(font=('helvetica', 10))\r\ncanvas1.create_window(200, 190, window=label7)\r\n\r\n\r\n# Defining the buttons\r\n\r\nbutton1 = tk.Button(text='Save',command = save)\r\ncanvas1.create_window(500, 250, window=button1)\r\n\r\nbutton5 = tk.Button(text='Search',command=Search)\r\ncanvas1.create_window(400, 250, window=button5)\r\n\r\nbutton3 = tk.Button(text='delete',command=delete)\r\ncanvas1.create_window(450, 250, window=button3)\r\n\r\n# Defining the tree\r\n\r\ntree=ttk.Treeview(root)\r\ntree[\"columns\"]=(\"one\",\"two\",\"three\",\"four\",\"five\",\"six\")\r\ntree.column(\"#0\", width=130, minwidth=270, stretch=tk.NO)\r\ntree.column(\"one\", width=100, minwidth=150, stretch=tk.NO)\r\ntree.column(\"two\", width=100, minwidth=100)\r\ntree.column(\"three\", width=100, minwidth=50, stretch=tk.NO)\r\ntree.column(\"three\", width=100, minwidth=50, stretch=tk.NO)\r\ntree.column(\"three\", width=100, minwidth=50, stretch=tk.NO)\r\ntree.heading(\"#0\",text=\"Name\",anchor=tk.W)\r\ntree.heading(\"one\", text=\"Age\",anchor=tk.W)\r\ntree.heading(\"two\", text=\"Gender\",anchor=tk.W)\r\ntree.heading(\"three\", text=\"Height\",anchor=tk.W)\r\ntree.heading(\"four\", text=\"Weight\",anchor=tk.W)\r\ntree.heading(\"five\", text=\"StudentId\",anchor=tk.W)\r\ntree.heading(\"six\", text=\"Sports\",anchor=tk.W)\r\ntree.pack()\r\nroot.mainloop()\r\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
from __future__ import division import numpy as np table = open("Tables\\table1.txt", "w") table.write("\\begin{tabular}{|c|c|c|c|} \\hline\n") table.write("Hidden Neurons & Loss & Training Acc. & Valid. Acc. \\\\ \\hline\n") H = [1,5,10,11,12,20,40] for h in H: file = open("Out\\out-h"+str(h)+".txt", "r") line = file.readlines()[-1] file.close() line = line.split(",") loss = line[1] acc_tr = line[2] acc_va = line[3] table.write(str(h)+" & "+loss+" & "+acc_tr+" & "+acc_va+" \\\\\n") table.write("\\hline\n") table.write("\\end{tabular}") table.close()
normal
{ "blob_id": "3cace66ddf8484d285c2b2a8fabbb83778a2c4af", "index": 4352, "step-1": "<mask token>\n", "step-2": "<mask token>\ntable.write('\\\\begin{tabular}{|c|c|c|c|} \\\\hline\\n')\ntable.write(\n 'Hidden Neurons & Loss & Training Acc. & Valid. Acc. \\\\\\\\ \\\\hline\\n')\n<mask token>\nfor h in H:\n file = open('Out\\\\out-h' + str(h) + '.txt', 'r')\n line = file.readlines()[-1]\n file.close()\n line = line.split(',')\n loss = line[1]\n acc_tr = line[2]\n acc_va = line[3]\n table.write(str(h) + ' & ' + loss + ' & ' + acc_tr + ' & ' + acc_va +\n ' \\\\\\\\\\n')\ntable.write('\\\\hline\\n')\ntable.write('\\\\end{tabular}')\ntable.close()\n", "step-3": "<mask token>\ntable = open('Tables\\\\table1.txt', 'w')\ntable.write('\\\\begin{tabular}{|c|c|c|c|} \\\\hline\\n')\ntable.write(\n 'Hidden Neurons & Loss & Training Acc. & Valid. Acc. \\\\\\\\ \\\\hline\\n')\nH = [1, 5, 10, 11, 12, 20, 40]\nfor h in H:\n file = open('Out\\\\out-h' + str(h) + '.txt', 'r')\n line = file.readlines()[-1]\n file.close()\n line = line.split(',')\n loss = line[1]\n acc_tr = line[2]\n acc_va = line[3]\n table.write(str(h) + ' & ' + loss + ' & ' + acc_tr + ' & ' + acc_va +\n ' \\\\\\\\\\n')\ntable.write('\\\\hline\\n')\ntable.write('\\\\end{tabular}')\ntable.close()\n", "step-4": "from __future__ import division\nimport numpy as np\ntable = open('Tables\\\\table1.txt', 'w')\ntable.write('\\\\begin{tabular}{|c|c|c|c|} \\\\hline\\n')\ntable.write(\n 'Hidden Neurons & Loss & Training Acc. & Valid. Acc. \\\\\\\\ \\\\hline\\n')\nH = [1, 5, 10, 11, 12, 20, 40]\nfor h in H:\n file = open('Out\\\\out-h' + str(h) + '.txt', 'r')\n line = file.readlines()[-1]\n file.close()\n line = line.split(',')\n loss = line[1]\n acc_tr = line[2]\n acc_va = line[3]\n table.write(str(h) + ' & ' + loss + ' & ' + acc_tr + ' & ' + acc_va +\n ' \\\\\\\\\\n')\ntable.write('\\\\hline\\n')\ntable.write('\\\\end{tabular}')\ntable.close()\n", "step-5": "from __future__ import division\nimport numpy as np\n\ntable = open(\"Tables\\\\table1.txt\", \"w\")\n\ntable.write(\"\\\\begin{tabular}{|c|c|c|c|} \\\\hline\\n\")\ntable.write(\"Hidden Neurons & Loss & Training Acc. & Valid. Acc. \\\\\\\\ \\\\hline\\n\")\n\nH = [1,5,10,11,12,20,40]\nfor h in H:\n\tfile = open(\"Out\\\\out-h\"+str(h)+\".txt\", \"r\")\n\tline = file.readlines()[-1]\n\tfile.close()\n\tline = line.split(\",\")\n\tloss = line[1]\n\tacc_tr = line[2]\n\tacc_va = line[3]\n\ttable.write(str(h)+\" & \"+loss+\" & \"+acc_tr+\" & \"+acc_va+\" \\\\\\\\\\n\")\n\ntable.write(\"\\\\hline\\n\")\ntable.write(\"\\\\end{tabular}\")\n\ntable.close()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class MoneyFst(GraphFst): <|reserved_special_token_0|> def __init__(self, decimal: GraphFst, deterministic: bool=True): super().__init__(name='money', kind='verbalize', deterministic= deterministic) maj_singular_masc = pynutil.delete('currency_maj: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('"') maj_singular_fem = pynutil.delete('currency_maj: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('"') maj_plural_masc = pynutil.delete('currency_maj: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('"') maj_plural_fem = pynutil.delete('currency_maj: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('"') maj_masc = maj_plural_masc | maj_singular_masc maj_fem = maj_plural_fem | maj_singular_fem min_singular_masc = pynutil.delete('currency_min: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('"') min_singular_fem = pynutil.delete('currency_min: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('"') min_plural_masc = pynutil.delete('currency_min: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('"') min_plural_fem = pynutil.delete('currency_min: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('"') min_masc = min_plural_masc | min_singular_masc min_fem = min_plural_fem | min_singular_fem fractional_part = pynutil.delete('fractional_part: "' ) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"') integer_part = pynutil.delete('integer_part: "') + pynini.closure( NEMO_NOT_QUOTE, 1) + pynutil.delete('"') optional_add_and = pynini.closure(pynutil.insert(pynini.union( 'con ', 'y ')), 0, 1) graph_integer_masc = integer_part + NEMO_SPACE + maj_masc graph_integer_fem = shift_cardinal_gender(integer_part ) + NEMO_SPACE + maj_fem graph_integer = graph_integer_fem | graph_integer_masc graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE + pynini.union(optional_add_and + strip_cardinal_apocope( fractional_part), optional_add_and + fractional_part + NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order) graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE + pynini.union(optional_add_and + shift_cardinal_gender( fractional_part), optional_add_and + fractional_part + NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order) graph_integer_with_minor = (graph_integer_with_minor_fem | graph_integer_with_minor_masc) graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc graph_decimal_fem = decimal.graph_fem graph_decimal_fem |= decimal.numbers_only_quantity graph_decimal_fem += NEMO_SPACE + maj_fem graph_decimal = graph_decimal_fem | graph_decimal_masc graph_decimal = pynini.cdrewrite(pynutil.insert(' de'), 'quantity: "' + pynini.closure(NEMO_NOT_QUOTE, 1), '"', NEMO_SIGMA ) @ graph_decimal graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc + delete_preserve_order) graph_minor_fem = shift_cardinal_gender(fractional_part ) + NEMO_SPACE + min_fem + delete_preserve_order graph_minor = graph_minor_fem | graph_minor_masc graph = (graph_integer | graph_integer_with_minor | graph_decimal | graph_minor) delete_tokens = self.delete_tokens(graph) self.fst = delete_tokens.optimize() <|reserved_special_token_1|> <|reserved_special_token_0|> class MoneyFst(GraphFst): """ Finite state transducer for verbalizing money, e.g. money { currency_maj: "euro" integer_part: "un"} -> "un euro" money { currency_maj: "euro" integer_part: "un" fractional_part: "cero cero un"} -> "uno coma cero cero uno euros" money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" preserve_order: true} -> "una libra cuarenta" money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" currency_min: "peniques" preserve_order: true} -> "una libra con cuarenta peniques" money { fractional_part: "un" currency_min: "penique" preserve_order: true} -> "un penique" Args: decimal: GraphFst deterministic: if True will provide a single transduction option, for False multiple transduction are generated (used for audio-based normalization) """ def __init__(self, decimal: GraphFst, deterministic: bool=True): super().__init__(name='money', kind='verbalize', deterministic= deterministic) maj_singular_masc = pynutil.delete('currency_maj: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('"') maj_singular_fem = pynutil.delete('currency_maj: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('"') maj_plural_masc = pynutil.delete('currency_maj: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('"') maj_plural_fem = pynutil.delete('currency_maj: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('"') maj_masc = maj_plural_masc | maj_singular_masc maj_fem = maj_plural_fem | maj_singular_fem min_singular_masc = pynutil.delete('currency_min: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('"') min_singular_fem = pynutil.delete('currency_min: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('"') min_plural_masc = pynutil.delete('currency_min: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('"') min_plural_fem = pynutil.delete('currency_min: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('"') min_masc = min_plural_masc | min_singular_masc min_fem = min_plural_fem | min_singular_fem fractional_part = pynutil.delete('fractional_part: "' ) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"') integer_part = pynutil.delete('integer_part: "') + pynini.closure( NEMO_NOT_QUOTE, 1) + pynutil.delete('"') optional_add_and = pynini.closure(pynutil.insert(pynini.union( 'con ', 'y ')), 0, 1) graph_integer_masc = integer_part + NEMO_SPACE + maj_masc graph_integer_fem = shift_cardinal_gender(integer_part ) + NEMO_SPACE + maj_fem graph_integer = graph_integer_fem | graph_integer_masc graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE + pynini.union(optional_add_and + strip_cardinal_apocope( fractional_part), optional_add_and + fractional_part + NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order) graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE + pynini.union(optional_add_and + shift_cardinal_gender( fractional_part), optional_add_and + fractional_part + NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order) graph_integer_with_minor = (graph_integer_with_minor_fem | graph_integer_with_minor_masc) graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc graph_decimal_fem = decimal.graph_fem graph_decimal_fem |= decimal.numbers_only_quantity graph_decimal_fem += NEMO_SPACE + maj_fem graph_decimal = graph_decimal_fem | graph_decimal_masc graph_decimal = pynini.cdrewrite(pynutil.insert(' de'), 'quantity: "' + pynini.closure(NEMO_NOT_QUOTE, 1), '"', NEMO_SIGMA ) @ graph_decimal graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc + delete_preserve_order) graph_minor_fem = shift_cardinal_gender(fractional_part ) + NEMO_SPACE + min_fem + delete_preserve_order graph_minor = graph_minor_fem | graph_minor_masc graph = (graph_integer | graph_integer_with_minor | graph_decimal | graph_minor) delete_tokens = self.delete_tokens(graph) self.fst = delete_tokens.optimize() <|reserved_special_token_1|> <|reserved_special_token_0|> try: import pynini from pynini.lib import pynutil fem = pynini.string_file(get_abs_path('data/money/currency_plural_fem.tsv') ) masc = pynini.string_file(get_abs_path( 'data/money/currency_plural_masc.tsv')) fem_singular = pynini.project(fem, 'input') masc_singular = pynini.project(masc, 'input') fem_plural = pynini.project(fem, 'output') masc_plural = pynini.project(masc, 'output') PYNINI_AVAILABLE = True except (ModuleNotFoundError, ImportError): fem_plural = None masc_plural = None fem_singular = None masc_singular = None PYNINI_AVAILABLE = False class MoneyFst(GraphFst): """ Finite state transducer for verbalizing money, e.g. money { currency_maj: "euro" integer_part: "un"} -> "un euro" money { currency_maj: "euro" integer_part: "un" fractional_part: "cero cero un"} -> "uno coma cero cero uno euros" money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" preserve_order: true} -> "una libra cuarenta" money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" currency_min: "peniques" preserve_order: true} -> "una libra con cuarenta peniques" money { fractional_part: "un" currency_min: "penique" preserve_order: true} -> "un penique" Args: decimal: GraphFst deterministic: if True will provide a single transduction option, for False multiple transduction are generated (used for audio-based normalization) """ def __init__(self, decimal: GraphFst, deterministic: bool=True): super().__init__(name='money', kind='verbalize', deterministic= deterministic) maj_singular_masc = pynutil.delete('currency_maj: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('"') maj_singular_fem = pynutil.delete('currency_maj: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('"') maj_plural_masc = pynutil.delete('currency_maj: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('"') maj_plural_fem = pynutil.delete('currency_maj: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('"') maj_masc = maj_plural_masc | maj_singular_masc maj_fem = maj_plural_fem | maj_singular_fem min_singular_masc = pynutil.delete('currency_min: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('"') min_singular_fem = pynutil.delete('currency_min: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('"') min_plural_masc = pynutil.delete('currency_min: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('"') min_plural_fem = pynutil.delete('currency_min: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('"') min_masc = min_plural_masc | min_singular_masc min_fem = min_plural_fem | min_singular_fem fractional_part = pynutil.delete('fractional_part: "' ) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"') integer_part = pynutil.delete('integer_part: "') + pynini.closure( NEMO_NOT_QUOTE, 1) + pynutil.delete('"') optional_add_and = pynini.closure(pynutil.insert(pynini.union( 'con ', 'y ')), 0, 1) graph_integer_masc = integer_part + NEMO_SPACE + maj_masc graph_integer_fem = shift_cardinal_gender(integer_part ) + NEMO_SPACE + maj_fem graph_integer = graph_integer_fem | graph_integer_masc graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE + pynini.union(optional_add_and + strip_cardinal_apocope( fractional_part), optional_add_and + fractional_part + NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order) graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE + pynini.union(optional_add_and + shift_cardinal_gender( fractional_part), optional_add_and + fractional_part + NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order) graph_integer_with_minor = (graph_integer_with_minor_fem | graph_integer_with_minor_masc) graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc graph_decimal_fem = decimal.graph_fem graph_decimal_fem |= decimal.numbers_only_quantity graph_decimal_fem += NEMO_SPACE + maj_fem graph_decimal = graph_decimal_fem | graph_decimal_masc graph_decimal = pynini.cdrewrite(pynutil.insert(' de'), 'quantity: "' + pynini.closure(NEMO_NOT_QUOTE, 1), '"', NEMO_SIGMA ) @ graph_decimal graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc + delete_preserve_order) graph_minor_fem = shift_cardinal_gender(fractional_part ) + NEMO_SPACE + min_fem + delete_preserve_order graph_minor = graph_minor_fem | graph_minor_masc graph = (graph_integer | graph_integer_with_minor | graph_decimal | graph_minor) delete_tokens = self.delete_tokens(graph) self.fst = delete_tokens.optimize() <|reserved_special_token_1|> from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, NEMO_SIGMA, NEMO_SPACE, GraphFst, delete_preserve_order from nemo_text_processing.text_normalization.es.graph_utils import shift_cardinal_gender, strip_cardinal_apocope from nemo_text_processing.text_normalization.es.utils import get_abs_path try: import pynini from pynini.lib import pynutil fem = pynini.string_file(get_abs_path('data/money/currency_plural_fem.tsv') ) masc = pynini.string_file(get_abs_path( 'data/money/currency_plural_masc.tsv')) fem_singular = pynini.project(fem, 'input') masc_singular = pynini.project(masc, 'input') fem_plural = pynini.project(fem, 'output') masc_plural = pynini.project(masc, 'output') PYNINI_AVAILABLE = True except (ModuleNotFoundError, ImportError): fem_plural = None masc_plural = None fem_singular = None masc_singular = None PYNINI_AVAILABLE = False class MoneyFst(GraphFst): """ Finite state transducer for verbalizing money, e.g. money { currency_maj: "euro" integer_part: "un"} -> "un euro" money { currency_maj: "euro" integer_part: "un" fractional_part: "cero cero un"} -> "uno coma cero cero uno euros" money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" preserve_order: true} -> "una libra cuarenta" money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" currency_min: "peniques" preserve_order: true} -> "una libra con cuarenta peniques" money { fractional_part: "un" currency_min: "penique" preserve_order: true} -> "un penique" Args: decimal: GraphFst deterministic: if True will provide a single transduction option, for False multiple transduction are generated (used for audio-based normalization) """ def __init__(self, decimal: GraphFst, deterministic: bool=True): super().__init__(name='money', kind='verbalize', deterministic= deterministic) maj_singular_masc = pynutil.delete('currency_maj: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('"') maj_singular_fem = pynutil.delete('currency_maj: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('"') maj_plural_masc = pynutil.delete('currency_maj: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('"') maj_plural_fem = pynutil.delete('currency_maj: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('"') maj_masc = maj_plural_masc | maj_singular_masc maj_fem = maj_plural_fem | maj_singular_fem min_singular_masc = pynutil.delete('currency_min: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('"') min_singular_fem = pynutil.delete('currency_min: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('"') min_plural_masc = pynutil.delete('currency_min: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('"') min_plural_fem = pynutil.delete('currency_min: "') + pynini.closure( NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('"') min_masc = min_plural_masc | min_singular_masc min_fem = min_plural_fem | min_singular_fem fractional_part = pynutil.delete('fractional_part: "' ) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"') integer_part = pynutil.delete('integer_part: "') + pynini.closure( NEMO_NOT_QUOTE, 1) + pynutil.delete('"') optional_add_and = pynini.closure(pynutil.insert(pynini.union( 'con ', 'y ')), 0, 1) graph_integer_masc = integer_part + NEMO_SPACE + maj_masc graph_integer_fem = shift_cardinal_gender(integer_part ) + NEMO_SPACE + maj_fem graph_integer = graph_integer_fem | graph_integer_masc graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE + pynini.union(optional_add_and + strip_cardinal_apocope( fractional_part), optional_add_and + fractional_part + NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order) graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE + pynini.union(optional_add_and + shift_cardinal_gender( fractional_part), optional_add_and + fractional_part + NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order) graph_integer_with_minor = (graph_integer_with_minor_fem | graph_integer_with_minor_masc) graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc graph_decimal_fem = decimal.graph_fem graph_decimal_fem |= decimal.numbers_only_quantity graph_decimal_fem += NEMO_SPACE + maj_fem graph_decimal = graph_decimal_fem | graph_decimal_masc graph_decimal = pynini.cdrewrite(pynutil.insert(' de'), 'quantity: "' + pynini.closure(NEMO_NOT_QUOTE, 1), '"', NEMO_SIGMA ) @ graph_decimal graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc + delete_preserve_order) graph_minor_fem = shift_cardinal_gender(fractional_part ) + NEMO_SPACE + min_fem + delete_preserve_order graph_minor = graph_minor_fem | graph_minor_masc graph = (graph_integer | graph_integer_with_minor | graph_decimal | graph_minor) delete_tokens = self.delete_tokens(graph) self.fst = delete_tokens.optimize() <|reserved_special_token_1|> # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from nemo_text_processing.text_normalization.en.graph_utils import ( NEMO_NOT_QUOTE, NEMO_SIGMA, NEMO_SPACE, GraphFst, delete_preserve_order, ) from nemo_text_processing.text_normalization.es.graph_utils import shift_cardinal_gender, strip_cardinal_apocope from nemo_text_processing.text_normalization.es.utils import get_abs_path try: import pynini from pynini.lib import pynutil fem = pynini.string_file((get_abs_path("data/money/currency_plural_fem.tsv"))) masc = pynini.string_file((get_abs_path("data/money/currency_plural_masc.tsv"))) fem_singular = pynini.project(fem, "input") masc_singular = pynini.project(masc, "input") fem_plural = pynini.project(fem, "output") masc_plural = pynini.project(masc, "output") PYNINI_AVAILABLE = True except (ModuleNotFoundError, ImportError): fem_plural = None masc_plural = None fem_singular = None masc_singular = None PYNINI_AVAILABLE = False class MoneyFst(GraphFst): """ Finite state transducer for verbalizing money, e.g. money { currency_maj: "euro" integer_part: "un"} -> "un euro" money { currency_maj: "euro" integer_part: "un" fractional_part: "cero cero un"} -> "uno coma cero cero uno euros" money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" preserve_order: true} -> "una libra cuarenta" money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" currency_min: "peniques" preserve_order: true} -> "una libra con cuarenta peniques" money { fractional_part: "un" currency_min: "penique" preserve_order: true} -> "un penique" Args: decimal: GraphFst deterministic: if True will provide a single transduction option, for False multiple transduction are generated (used for audio-based normalization) """ def __init__(self, decimal: GraphFst, deterministic: bool = True): super().__init__(name="money", kind="verbalize", deterministic=deterministic) maj_singular_masc = ( pynutil.delete("currency_maj: \"") + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_singular) + pynutil.delete("\"") ) maj_singular_fem = ( pynutil.delete("currency_maj: \"") + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_singular) + pynutil.delete("\"") ) maj_plural_masc = ( pynutil.delete("currency_maj: \"") + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_plural) + pynutil.delete("\"") ) maj_plural_fem = ( pynutil.delete("currency_maj: \"") + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_plural) + pynutil.delete("\"") ) maj_masc = maj_plural_masc | maj_singular_masc # Tagger kept quantity resolution stable maj_fem = maj_plural_fem | maj_singular_fem min_singular_masc = ( pynutil.delete("currency_min: \"") + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_singular) + pynutil.delete("\"") ) min_singular_fem = ( pynutil.delete("currency_min: \"") + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_singular) + pynutil.delete("\"") ) min_plural_masc = ( pynutil.delete("currency_min: \"") + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_plural) + pynutil.delete("\"") ) min_plural_fem = ( pynutil.delete("currency_min: \"") + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_plural) + pynutil.delete("\"") ) min_masc = min_plural_masc | min_singular_masc min_fem = min_plural_fem | min_singular_fem fractional_part = ( pynutil.delete("fractional_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"") ) integer_part = pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"") optional_add_and = pynini.closure(pynutil.insert(pynini.union("con ", "y ")), 0, 1) # *** currency_maj graph_integer_masc = integer_part + NEMO_SPACE + maj_masc graph_integer_fem = shift_cardinal_gender(integer_part) + NEMO_SPACE + maj_fem graph_integer = graph_integer_fem | graph_integer_masc # *** currency_maj + (***) | ((con) *** current_min) graph_integer_with_minor_masc = ( graph_integer_masc + NEMO_SPACE + pynini.union( optional_add_and + strip_cardinal_apocope(fractional_part), (optional_add_and + fractional_part + NEMO_SPACE + min_masc), (optional_add_and + shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem), ) # Could be minor currency that is different gender + delete_preserve_order ) graph_integer_with_minor_fem = ( graph_integer_fem + NEMO_SPACE + pynini.union( optional_add_and + shift_cardinal_gender(fractional_part), (optional_add_and + fractional_part + NEMO_SPACE + min_masc), (optional_add_and + shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem), ) # Could be minor currency that is different gender + delete_preserve_order ) graph_integer_with_minor = graph_integer_with_minor_fem | graph_integer_with_minor_masc ## *** coma *** currency_maj graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc graph_decimal_fem = decimal.graph_fem graph_decimal_fem |= decimal.numbers_only_quantity # can still have "x billions" with fem currency graph_decimal_fem += NEMO_SPACE + maj_fem graph_decimal = graph_decimal_fem | graph_decimal_masc graph_decimal = ( pynini.cdrewrite( pynutil.insert(" de"), "quantity: \"" + pynini.closure(NEMO_NOT_QUOTE, 1), "\"", NEMO_SIGMA ) @ graph_decimal ) # formally it's millones/billones de *** # *** current_min graph_minor_masc = fractional_part + NEMO_SPACE + min_masc + delete_preserve_order graph_minor_fem = shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem + delete_preserve_order graph_minor = graph_minor_fem | graph_minor_masc graph = graph_integer | graph_integer_with_minor | graph_decimal | graph_minor delete_tokens = self.delete_tokens(graph) self.fst = delete_tokens.optimize()
flexible
{ "blob_id": "dccdca65cce2959b07657636e23e7c9ab8a4f96c", "index": 1382, "step-1": "<mask token>\n\n\nclass MoneyFst(GraphFst):\n <mask token>\n\n def __init__(self, decimal: GraphFst, deterministic: bool=True):\n super().__init__(name='money', kind='verbalize', deterministic=\n deterministic)\n maj_singular_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n maj_singular_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n maj_plural_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n maj_plural_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n maj_masc = maj_plural_masc | maj_singular_masc\n maj_fem = maj_plural_fem | maj_singular_fem\n min_singular_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n min_singular_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n min_plural_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n min_plural_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n min_masc = min_plural_masc | min_singular_masc\n min_fem = min_plural_fem | min_singular_fem\n fractional_part = pynutil.delete('fractional_part: \"'\n ) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n integer_part = pynutil.delete('integer_part: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n optional_add_and = pynini.closure(pynutil.insert(pynini.union(\n 'con ', 'y ')), 0, 1)\n graph_integer_masc = integer_part + NEMO_SPACE + maj_masc\n graph_integer_fem = shift_cardinal_gender(integer_part\n ) + NEMO_SPACE + maj_fem\n graph_integer = graph_integer_fem | graph_integer_masc\n graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE +\n pynini.union(optional_add_and + strip_cardinal_apocope(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE +\n pynini.union(optional_add_and + shift_cardinal_gender(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor = (graph_integer_with_minor_fem |\n graph_integer_with_minor_masc)\n graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc\n graph_decimal_fem = decimal.graph_fem\n graph_decimal_fem |= decimal.numbers_only_quantity\n graph_decimal_fem += NEMO_SPACE + maj_fem\n graph_decimal = graph_decimal_fem | graph_decimal_masc\n graph_decimal = pynini.cdrewrite(pynutil.insert(' de'), \n 'quantity: \"' + pynini.closure(NEMO_NOT_QUOTE, 1), '\"', NEMO_SIGMA\n ) @ graph_decimal\n graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc +\n delete_preserve_order)\n graph_minor_fem = shift_cardinal_gender(fractional_part\n ) + NEMO_SPACE + min_fem + delete_preserve_order\n graph_minor = graph_minor_fem | graph_minor_masc\n graph = (graph_integer | graph_integer_with_minor | graph_decimal |\n graph_minor)\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n", "step-2": "<mask token>\n\n\nclass MoneyFst(GraphFst):\n \"\"\"\n Finite state transducer for verbalizing money, e.g.\n money { currency_maj: \"euro\" integer_part: \"un\"} -> \"un euro\"\n money { currency_maj: \"euro\" integer_part: \"un\" fractional_part: \"cero cero un\"} -> \"uno coma cero cero uno euros\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" preserve_order: true} -> \"una libra cuarenta\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" currency_min: \"peniques\" preserve_order: true} -> \"una libra con cuarenta peniques\"\n money { fractional_part: \"un\" currency_min: \"penique\" preserve_order: true} -> \"un penique\"\n\n Args:\n decimal: GraphFst\n deterministic: if True will provide a single transduction option,\n for False multiple transduction are generated (used for audio-based normalization)\n \"\"\"\n\n def __init__(self, decimal: GraphFst, deterministic: bool=True):\n super().__init__(name='money', kind='verbalize', deterministic=\n deterministic)\n maj_singular_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n maj_singular_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n maj_plural_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n maj_plural_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n maj_masc = maj_plural_masc | maj_singular_masc\n maj_fem = maj_plural_fem | maj_singular_fem\n min_singular_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n min_singular_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n min_plural_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n min_plural_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n min_masc = min_plural_masc | min_singular_masc\n min_fem = min_plural_fem | min_singular_fem\n fractional_part = pynutil.delete('fractional_part: \"'\n ) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n integer_part = pynutil.delete('integer_part: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n optional_add_and = pynini.closure(pynutil.insert(pynini.union(\n 'con ', 'y ')), 0, 1)\n graph_integer_masc = integer_part + NEMO_SPACE + maj_masc\n graph_integer_fem = shift_cardinal_gender(integer_part\n ) + NEMO_SPACE + maj_fem\n graph_integer = graph_integer_fem | graph_integer_masc\n graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE +\n pynini.union(optional_add_and + strip_cardinal_apocope(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE +\n pynini.union(optional_add_and + shift_cardinal_gender(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor = (graph_integer_with_minor_fem |\n graph_integer_with_minor_masc)\n graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc\n graph_decimal_fem = decimal.graph_fem\n graph_decimal_fem |= decimal.numbers_only_quantity\n graph_decimal_fem += NEMO_SPACE + maj_fem\n graph_decimal = graph_decimal_fem | graph_decimal_masc\n graph_decimal = pynini.cdrewrite(pynutil.insert(' de'), \n 'quantity: \"' + pynini.closure(NEMO_NOT_QUOTE, 1), '\"', NEMO_SIGMA\n ) @ graph_decimal\n graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc +\n delete_preserve_order)\n graph_minor_fem = shift_cardinal_gender(fractional_part\n ) + NEMO_SPACE + min_fem + delete_preserve_order\n graph_minor = graph_minor_fem | graph_minor_masc\n graph = (graph_integer | graph_integer_with_minor | graph_decimal |\n graph_minor)\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n", "step-3": "<mask token>\ntry:\n import pynini\n from pynini.lib import pynutil\n fem = pynini.string_file(get_abs_path('data/money/currency_plural_fem.tsv')\n )\n masc = pynini.string_file(get_abs_path(\n 'data/money/currency_plural_masc.tsv'))\n fem_singular = pynini.project(fem, 'input')\n masc_singular = pynini.project(masc, 'input')\n fem_plural = pynini.project(fem, 'output')\n masc_plural = pynini.project(masc, 'output')\n PYNINI_AVAILABLE = True\nexcept (ModuleNotFoundError, ImportError):\n fem_plural = None\n masc_plural = None\n fem_singular = None\n masc_singular = None\n PYNINI_AVAILABLE = False\n\n\nclass MoneyFst(GraphFst):\n \"\"\"\n Finite state transducer for verbalizing money, e.g.\n money { currency_maj: \"euro\" integer_part: \"un\"} -> \"un euro\"\n money { currency_maj: \"euro\" integer_part: \"un\" fractional_part: \"cero cero un\"} -> \"uno coma cero cero uno euros\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" preserve_order: true} -> \"una libra cuarenta\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" currency_min: \"peniques\" preserve_order: true} -> \"una libra con cuarenta peniques\"\n money { fractional_part: \"un\" currency_min: \"penique\" preserve_order: true} -> \"un penique\"\n\n Args:\n decimal: GraphFst\n deterministic: if True will provide a single transduction option,\n for False multiple transduction are generated (used for audio-based normalization)\n \"\"\"\n\n def __init__(self, decimal: GraphFst, deterministic: bool=True):\n super().__init__(name='money', kind='verbalize', deterministic=\n deterministic)\n maj_singular_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n maj_singular_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n maj_plural_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n maj_plural_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n maj_masc = maj_plural_masc | maj_singular_masc\n maj_fem = maj_plural_fem | maj_singular_fem\n min_singular_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n min_singular_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n min_plural_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n min_plural_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n min_masc = min_plural_masc | min_singular_masc\n min_fem = min_plural_fem | min_singular_fem\n fractional_part = pynutil.delete('fractional_part: \"'\n ) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n integer_part = pynutil.delete('integer_part: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n optional_add_and = pynini.closure(pynutil.insert(pynini.union(\n 'con ', 'y ')), 0, 1)\n graph_integer_masc = integer_part + NEMO_SPACE + maj_masc\n graph_integer_fem = shift_cardinal_gender(integer_part\n ) + NEMO_SPACE + maj_fem\n graph_integer = graph_integer_fem | graph_integer_masc\n graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE +\n pynini.union(optional_add_and + strip_cardinal_apocope(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE +\n pynini.union(optional_add_and + shift_cardinal_gender(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor = (graph_integer_with_minor_fem |\n graph_integer_with_minor_masc)\n graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc\n graph_decimal_fem = decimal.graph_fem\n graph_decimal_fem |= decimal.numbers_only_quantity\n graph_decimal_fem += NEMO_SPACE + maj_fem\n graph_decimal = graph_decimal_fem | graph_decimal_masc\n graph_decimal = pynini.cdrewrite(pynutil.insert(' de'), \n 'quantity: \"' + pynini.closure(NEMO_NOT_QUOTE, 1), '\"', NEMO_SIGMA\n ) @ graph_decimal\n graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc +\n delete_preserve_order)\n graph_minor_fem = shift_cardinal_gender(fractional_part\n ) + NEMO_SPACE + min_fem + delete_preserve_order\n graph_minor = graph_minor_fem | graph_minor_masc\n graph = (graph_integer | graph_integer_with_minor | graph_decimal |\n graph_minor)\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n", "step-4": "from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, NEMO_SIGMA, NEMO_SPACE, GraphFst, delete_preserve_order\nfrom nemo_text_processing.text_normalization.es.graph_utils import shift_cardinal_gender, strip_cardinal_apocope\nfrom nemo_text_processing.text_normalization.es.utils import get_abs_path\ntry:\n import pynini\n from pynini.lib import pynutil\n fem = pynini.string_file(get_abs_path('data/money/currency_plural_fem.tsv')\n )\n masc = pynini.string_file(get_abs_path(\n 'data/money/currency_plural_masc.tsv'))\n fem_singular = pynini.project(fem, 'input')\n masc_singular = pynini.project(masc, 'input')\n fem_plural = pynini.project(fem, 'output')\n masc_plural = pynini.project(masc, 'output')\n PYNINI_AVAILABLE = True\nexcept (ModuleNotFoundError, ImportError):\n fem_plural = None\n masc_plural = None\n fem_singular = None\n masc_singular = None\n PYNINI_AVAILABLE = False\n\n\nclass MoneyFst(GraphFst):\n \"\"\"\n Finite state transducer for verbalizing money, e.g.\n money { currency_maj: \"euro\" integer_part: \"un\"} -> \"un euro\"\n money { currency_maj: \"euro\" integer_part: \"un\" fractional_part: \"cero cero un\"} -> \"uno coma cero cero uno euros\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" preserve_order: true} -> \"una libra cuarenta\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" currency_min: \"peniques\" preserve_order: true} -> \"una libra con cuarenta peniques\"\n money { fractional_part: \"un\" currency_min: \"penique\" preserve_order: true} -> \"un penique\"\n\n Args:\n decimal: GraphFst\n deterministic: if True will provide a single transduction option,\n for False multiple transduction are generated (used for audio-based normalization)\n \"\"\"\n\n def __init__(self, decimal: GraphFst, deterministic: bool=True):\n super().__init__(name='money', kind='verbalize', deterministic=\n deterministic)\n maj_singular_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n maj_singular_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n maj_plural_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n maj_plural_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n maj_masc = maj_plural_masc | maj_singular_masc\n maj_fem = maj_plural_fem | maj_singular_fem\n min_singular_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n min_singular_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n min_plural_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n min_plural_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n min_masc = min_plural_masc | min_singular_masc\n min_fem = min_plural_fem | min_singular_fem\n fractional_part = pynutil.delete('fractional_part: \"'\n ) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n integer_part = pynutil.delete('integer_part: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n optional_add_and = pynini.closure(pynutil.insert(pynini.union(\n 'con ', 'y ')), 0, 1)\n graph_integer_masc = integer_part + NEMO_SPACE + maj_masc\n graph_integer_fem = shift_cardinal_gender(integer_part\n ) + NEMO_SPACE + maj_fem\n graph_integer = graph_integer_fem | graph_integer_masc\n graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE +\n pynini.union(optional_add_and + strip_cardinal_apocope(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE +\n pynini.union(optional_add_and + shift_cardinal_gender(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor = (graph_integer_with_minor_fem |\n graph_integer_with_minor_masc)\n graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc\n graph_decimal_fem = decimal.graph_fem\n graph_decimal_fem |= decimal.numbers_only_quantity\n graph_decimal_fem += NEMO_SPACE + maj_fem\n graph_decimal = graph_decimal_fem | graph_decimal_masc\n graph_decimal = pynini.cdrewrite(pynutil.insert(' de'), \n 'quantity: \"' + pynini.closure(NEMO_NOT_QUOTE, 1), '\"', NEMO_SIGMA\n ) @ graph_decimal\n graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc +\n delete_preserve_order)\n graph_minor_fem = shift_cardinal_gender(fractional_part\n ) + NEMO_SPACE + min_fem + delete_preserve_order\n graph_minor = graph_minor_fem | graph_minor_masc\n graph = (graph_integer | graph_integer_with_minor | graph_decimal |\n graph_minor)\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n", "step-5": "# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom nemo_text_processing.text_normalization.en.graph_utils import (\n NEMO_NOT_QUOTE,\n NEMO_SIGMA,\n NEMO_SPACE,\n GraphFst,\n delete_preserve_order,\n)\nfrom nemo_text_processing.text_normalization.es.graph_utils import shift_cardinal_gender, strip_cardinal_apocope\nfrom nemo_text_processing.text_normalization.es.utils import get_abs_path\n\ntry:\n import pynini\n from pynini.lib import pynutil\n\n fem = pynini.string_file((get_abs_path(\"data/money/currency_plural_fem.tsv\")))\n masc = pynini.string_file((get_abs_path(\"data/money/currency_plural_masc.tsv\")))\n\n fem_singular = pynini.project(fem, \"input\")\n masc_singular = pynini.project(masc, \"input\")\n\n fem_plural = pynini.project(fem, \"output\")\n masc_plural = pynini.project(masc, \"output\")\n\n PYNINI_AVAILABLE = True\n\nexcept (ModuleNotFoundError, ImportError):\n fem_plural = None\n masc_plural = None\n\n fem_singular = None\n masc_singular = None\n\n PYNINI_AVAILABLE = False\n\n\nclass MoneyFst(GraphFst):\n \"\"\"\n Finite state transducer for verbalizing money, e.g.\n money { currency_maj: \"euro\" integer_part: \"un\"} -> \"un euro\"\n money { currency_maj: \"euro\" integer_part: \"un\" fractional_part: \"cero cero un\"} -> \"uno coma cero cero uno euros\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" preserve_order: true} -> \"una libra cuarenta\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" currency_min: \"peniques\" preserve_order: true} -> \"una libra con cuarenta peniques\"\n money { fractional_part: \"un\" currency_min: \"penique\" preserve_order: true} -> \"un penique\"\n\n Args:\n decimal: GraphFst\n deterministic: if True will provide a single transduction option,\n for False multiple transduction are generated (used for audio-based normalization)\n \"\"\"\n\n def __init__(self, decimal: GraphFst, deterministic: bool = True):\n super().__init__(name=\"money\", kind=\"verbalize\", deterministic=deterministic)\n\n maj_singular_masc = (\n pynutil.delete(\"currency_maj: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_singular)\n + pynutil.delete(\"\\\"\")\n )\n maj_singular_fem = (\n pynutil.delete(\"currency_maj: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_singular)\n + pynutil.delete(\"\\\"\")\n )\n\n maj_plural_masc = (\n pynutil.delete(\"currency_maj: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_plural)\n + pynutil.delete(\"\\\"\")\n )\n maj_plural_fem = (\n pynutil.delete(\"currency_maj: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_plural)\n + pynutil.delete(\"\\\"\")\n )\n\n maj_masc = maj_plural_masc | maj_singular_masc # Tagger kept quantity resolution stable\n maj_fem = maj_plural_fem | maj_singular_fem\n\n min_singular_masc = (\n pynutil.delete(\"currency_min: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_singular)\n + pynutil.delete(\"\\\"\")\n )\n min_singular_fem = (\n pynutil.delete(\"currency_min: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_singular)\n + pynutil.delete(\"\\\"\")\n )\n\n min_plural_masc = (\n pynutil.delete(\"currency_min: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_plural)\n + pynutil.delete(\"\\\"\")\n )\n min_plural_fem = (\n pynutil.delete(\"currency_min: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_plural)\n + pynutil.delete(\"\\\"\")\n )\n\n min_masc = min_plural_masc | min_singular_masc\n min_fem = min_plural_fem | min_singular_fem\n\n fractional_part = (\n pynutil.delete(\"fractional_part: \\\"\") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete(\"\\\"\")\n )\n\n integer_part = pynutil.delete(\"integer_part: \\\"\") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete(\"\\\"\")\n optional_add_and = pynini.closure(pynutil.insert(pynini.union(\"con \", \"y \")), 0, 1)\n\n # *** currency_maj\n graph_integer_masc = integer_part + NEMO_SPACE + maj_masc\n graph_integer_fem = shift_cardinal_gender(integer_part) + NEMO_SPACE + maj_fem\n\n graph_integer = graph_integer_fem | graph_integer_masc\n\n # *** currency_maj + (***) | ((con) *** current_min)\n graph_integer_with_minor_masc = (\n graph_integer_masc\n + NEMO_SPACE\n + pynini.union(\n optional_add_and + strip_cardinal_apocope(fractional_part),\n (optional_add_and + fractional_part + NEMO_SPACE + min_masc),\n (optional_add_and + shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem),\n ) # Could be minor currency that is different gender\n + delete_preserve_order\n )\n\n graph_integer_with_minor_fem = (\n graph_integer_fem\n + NEMO_SPACE\n + pynini.union(\n optional_add_and + shift_cardinal_gender(fractional_part),\n (optional_add_and + fractional_part + NEMO_SPACE + min_masc),\n (optional_add_and + shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem),\n ) # Could be minor currency that is different gender\n + delete_preserve_order\n )\n\n graph_integer_with_minor = graph_integer_with_minor_fem | graph_integer_with_minor_masc\n\n ## *** coma *** currency_maj\n graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc\n\n graph_decimal_fem = decimal.graph_fem\n graph_decimal_fem |= decimal.numbers_only_quantity # can still have \"x billions\" with fem currency\n graph_decimal_fem += NEMO_SPACE + maj_fem\n\n graph_decimal = graph_decimal_fem | graph_decimal_masc\n graph_decimal = (\n pynini.cdrewrite(\n pynutil.insert(\" de\"), \"quantity: \\\"\" + pynini.closure(NEMO_NOT_QUOTE, 1), \"\\\"\", NEMO_SIGMA\n )\n @ graph_decimal\n ) # formally it's millones/billones de ***\n\n # *** current_min\n graph_minor_masc = fractional_part + NEMO_SPACE + min_masc + delete_preserve_order\n graph_minor_fem = shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem + delete_preserve_order\n\n graph_minor = graph_minor_fem | graph_minor_masc\n\n graph = graph_integer | graph_integer_with_minor | graph_decimal | graph_minor\n\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def query_parse(GIVEN_QUERY): try: countryIds_query = list(map(lambda x: x['country_id'], GIVEN_QUERY[ 'countries'])) except: countryIds_query = None try: days_query = GIVEN_QUERY['days'] except: days_query = None try: regions_query = GIVEN_QUERY['regions'] except: regions_query = [] try: regionDic_query = list(map(lambda x: {x['region_id']: x['day']}, regions_query)) except: regionDic_query = [] try: pois_query = GIVEN_QUERY['pois'] except: pois_query = [] try: regionNotGo_query = GIVEN_QUERY['regionNotGo'] except: regionNotGo_query = [] try: poiNotGo_query = GIVEN_QUERY['poiNotGo'] except: poiNotGo_query = [] try: regionSorted_query = GIVEN_QUERY['regionSorted'] except: regionSorted_query = [] try: availableMonths_query = GIVEN_QUERY['availableMonths'] except: availableMonths_query = [] try: price_query = GIVEN_QUERY['price'] except: price_query = None try: hotelRating_query = GIVEN_QUERY['hotelRating'] except: hotelRating_query = None try: arrivalRegionId_query = GIVEN_QUERY['arrivalRegionId'] except: arrivalRegionId_query = None try: departRegionId_query = GIVEN_QUERY['departRegionId'] except: departRegionId_query = None connection = mysqlConnection() try: with connection.cursor() as cursor: if GIVEN_QUERY['countries']: if arrivalRegionId_query: sql = ( "SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)" % (arrivalRegionId_query, str(countryIds_query)[1:-1]) ) else: sql = ( "SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)" % str(countryIds_query)[1:-1]) else: sql = ( "SELECT id FROM tidy_parts WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null " ) cursor.execute(sql) startParts = cursor.fetchall() if GIVEN_QUERY['countries']: if departRegionId_query: sql = ( "SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)" % (departRegionId_query, str(countryIds_query)[1:-1])) else: sql = ( "SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)" % str(countryIds_query)[1:-1]) else: sql = ( "SELECT id FROM tidy_parts WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null " ) cursor.execute(sql) endParts = cursor.fetchall() finally: connection.close() startParts = [dict['id'] for dict in startParts] endParts = [dict['id'] for dict in endParts] return (countryIds_query, days_query, regions_query, regionDic_query, pois_query, regionNotGo_query, poiNotGo_query, regionSorted_query, availableMonths_query, price_query, hotelRating_query, arrivalRegionId_query, departRegionId_query, startParts, endParts) <|reserved_special_token_1|> from mysqlConnection import mysqlConnection import yaml import copy import time import csv import json from collections import OrderedDict import ast def query_parse(GIVEN_QUERY): try: countryIds_query = list(map(lambda x: x['country_id'], GIVEN_QUERY[ 'countries'])) except: countryIds_query = None try: days_query = GIVEN_QUERY['days'] except: days_query = None try: regions_query = GIVEN_QUERY['regions'] except: regions_query = [] try: regionDic_query = list(map(lambda x: {x['region_id']: x['day']}, regions_query)) except: regionDic_query = [] try: pois_query = GIVEN_QUERY['pois'] except: pois_query = [] try: regionNotGo_query = GIVEN_QUERY['regionNotGo'] except: regionNotGo_query = [] try: poiNotGo_query = GIVEN_QUERY['poiNotGo'] except: poiNotGo_query = [] try: regionSorted_query = GIVEN_QUERY['regionSorted'] except: regionSorted_query = [] try: availableMonths_query = GIVEN_QUERY['availableMonths'] except: availableMonths_query = [] try: price_query = GIVEN_QUERY['price'] except: price_query = None try: hotelRating_query = GIVEN_QUERY['hotelRating'] except: hotelRating_query = None try: arrivalRegionId_query = GIVEN_QUERY['arrivalRegionId'] except: arrivalRegionId_query = None try: departRegionId_query = GIVEN_QUERY['departRegionId'] except: departRegionId_query = None connection = mysqlConnection() try: with connection.cursor() as cursor: if GIVEN_QUERY['countries']: if arrivalRegionId_query: sql = ( "SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)" % (arrivalRegionId_query, str(countryIds_query)[1:-1]) ) else: sql = ( "SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)" % str(countryIds_query)[1:-1]) else: sql = ( "SELECT id FROM tidy_parts WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null " ) cursor.execute(sql) startParts = cursor.fetchall() if GIVEN_QUERY['countries']: if departRegionId_query: sql = ( "SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)" % (departRegionId_query, str(countryIds_query)[1:-1])) else: sql = ( "SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)" % str(countryIds_query)[1:-1]) else: sql = ( "SELECT id FROM tidy_parts WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null " ) cursor.execute(sql) endParts = cursor.fetchall() finally: connection.close() startParts = [dict['id'] for dict in startParts] endParts = [dict['id'] for dict in endParts] return (countryIds_query, days_query, regions_query, regionDic_query, pois_query, regionNotGo_query, poiNotGo_query, regionSorted_query, availableMonths_query, price_query, hotelRating_query, arrivalRegionId_query, departRegionId_query, startParts, endParts) <|reserved_special_token_1|> #!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2017/6/20 下午4:00 # @Author : Huang HUi # @Site : # @File : query_parse.py # @Software: PyCharm from mysqlConnection import mysqlConnection import yaml import copy import time import csv import json from collections import OrderedDict import ast # # GIVEN_QUERY = {'days': [10,14], 'countries': [{'country_id': 28, 'day': None}], # 'regions': [{'region_id': 2, 'day': None}, {'region_id': 27, 'day': None}, {'region_id': 69, 'day': None}], 'pois': [], # 'regionNotGo': [], 'poiNotGo': [], 'regionSorted': [135, 131], 'availableMonths': [], # 'price': [5000, 15000], 'hotelRating': None, 'arrivalRegionId': 27, 'departRegionId': None} # GIVEN_QUERY={'days': [10,13], 'countries': [{'country_id': 11, 'day': None}], 'regions': [{'region_id': 266, 'day': None}, # {'region_id': 220, 'day': None}], 'pois': [795, 800,878,1536]} # GIVEN_QUERY={'days': [12], 'countries': [{'country_id': 28, 'day': None}], # 'regions': [{'region_id': 2, 'day': None}, {'region_id': 70, 'day': None}], # 'pois': [1361, 1380, 1382, 1385, 1386, 1413, 1512, 1700, 1701, 1712, 1713]} def query_parse(GIVEN_QUERY): try: countryIds_query = list(map(lambda x: x['country_id'], GIVEN_QUERY['countries'])) except : countryIds_query=None try: days_query=GIVEN_QUERY['days'] except : days_query=None try: regions_query = GIVEN_QUERY['regions'] except : regions_query=[] try: regionDic_query = list(map(lambda x: {x['region_id']: x['day']}, regions_query)) except : regionDic_query=[] try: pois_query=GIVEN_QUERY['pois'] except : pois_query=[] try: regionNotGo_query=GIVEN_QUERY['regionNotGo'] except : regionNotGo_query=[] try: poiNotGo_query=GIVEN_QUERY['poiNotGo'] except : poiNotGo_query=[] try: regionSorted_query=GIVEN_QUERY['regionSorted'] except : regionSorted_query=[] try: availableMonths_query=GIVEN_QUERY['availableMonths'] except : availableMonths_query=[] try: price_query=GIVEN_QUERY['price'] except : price_query=None try: hotelRating_query=GIVEN_QUERY['hotelRating'] except : hotelRating_query=None try: arrivalRegionId_query=GIVEN_QUERY['arrivalRegionId'] except : arrivalRegionId_query=None try: departRegionId_query=GIVEN_QUERY['departRegionId'] except: departRegionId_query=None connection=mysqlConnection() try: with connection.cursor() as cursor: if GIVEN_QUERY['countries']: # country condition if arrivalRegionId_query: sql = "SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)" % (arrivalRegionId_query,str(countryIds_query)[1:-1]) else: sql = "SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)" % str(countryIds_query)[1:-1] else: # all sql = "SELECT id FROM tidy_parts WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null " cursor.execute(sql) startParts = cursor.fetchall() if GIVEN_QUERY['countries']: if departRegionId_query: sql = "SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)" % (departRegionId_query, str(countryIds_query)[1:-1]) else: sql = "SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)" % str(countryIds_query)[1:-1] else: sql = "SELECT id FROM tidy_parts WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null " cursor.execute(sql) endParts = cursor.fetchall() finally: connection.close() startParts = [dict['id'] for dict in startParts] endParts = [dict['id'] for dict in endParts] return countryIds_query, days_query, regions_query, regionDic_query, \ pois_query, regionNotGo_query, poiNotGo_query, regionSorted_query, availableMonths_query, price_query, \ hotelRating_query, arrivalRegionId_query, departRegionId_query,startParts,endParts
flexible
{ "blob_id": "b52807a15cef8f07f685f8761a470d4a24d9c3dc", "index": 6603, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef query_parse(GIVEN_QUERY):\n try:\n countryIds_query = list(map(lambda x: x['country_id'], GIVEN_QUERY[\n 'countries']))\n except:\n countryIds_query = None\n try:\n days_query = GIVEN_QUERY['days']\n except:\n days_query = None\n try:\n regions_query = GIVEN_QUERY['regions']\n except:\n regions_query = []\n try:\n regionDic_query = list(map(lambda x: {x['region_id']: x['day']},\n regions_query))\n except:\n regionDic_query = []\n try:\n pois_query = GIVEN_QUERY['pois']\n except:\n pois_query = []\n try:\n regionNotGo_query = GIVEN_QUERY['regionNotGo']\n except:\n regionNotGo_query = []\n try:\n poiNotGo_query = GIVEN_QUERY['poiNotGo']\n except:\n poiNotGo_query = []\n try:\n regionSorted_query = GIVEN_QUERY['regionSorted']\n except:\n regionSorted_query = []\n try:\n availableMonths_query = GIVEN_QUERY['availableMonths']\n except:\n availableMonths_query = []\n try:\n price_query = GIVEN_QUERY['price']\n except:\n price_query = None\n try:\n hotelRating_query = GIVEN_QUERY['hotelRating']\n except:\n hotelRating_query = None\n try:\n arrivalRegionId_query = GIVEN_QUERY['arrivalRegionId']\n except:\n arrivalRegionId_query = None\n try:\n departRegionId_query = GIVEN_QUERY['departRegionId']\n except:\n departRegionId_query = None\n connection = mysqlConnection()\n try:\n with connection.cursor() as cursor:\n if GIVEN_QUERY['countries']:\n if arrivalRegionId_query:\n sql = (\n \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\"\n % (arrivalRegionId_query, str(countryIds_query)[1:-1])\n )\n else:\n sql = (\n \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\"\n % str(countryIds_query)[1:-1])\n else:\n sql = (\n \"SELECT id FROM tidy_parts WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n )\n cursor.execute(sql)\n startParts = cursor.fetchall()\n if GIVEN_QUERY['countries']:\n if departRegionId_query:\n sql = (\n \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\"\n % (departRegionId_query, str(countryIds_query)[1:-1]))\n else:\n sql = (\n \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\"\n % str(countryIds_query)[1:-1])\n else:\n sql = (\n \"SELECT id FROM tidy_parts WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n )\n cursor.execute(sql)\n endParts = cursor.fetchall()\n finally:\n connection.close()\n startParts = [dict['id'] for dict in startParts]\n endParts = [dict['id'] for dict in endParts]\n return (countryIds_query, days_query, regions_query, regionDic_query,\n pois_query, regionNotGo_query, poiNotGo_query, regionSorted_query,\n availableMonths_query, price_query, hotelRating_query,\n arrivalRegionId_query, departRegionId_query, startParts, endParts)\n", "step-3": "from mysqlConnection import mysqlConnection\nimport yaml\nimport copy\nimport time\nimport csv\nimport json\nfrom collections import OrderedDict\nimport ast\n\n\ndef query_parse(GIVEN_QUERY):\n try:\n countryIds_query = list(map(lambda x: x['country_id'], GIVEN_QUERY[\n 'countries']))\n except:\n countryIds_query = None\n try:\n days_query = GIVEN_QUERY['days']\n except:\n days_query = None\n try:\n regions_query = GIVEN_QUERY['regions']\n except:\n regions_query = []\n try:\n regionDic_query = list(map(lambda x: {x['region_id']: x['day']},\n regions_query))\n except:\n regionDic_query = []\n try:\n pois_query = GIVEN_QUERY['pois']\n except:\n pois_query = []\n try:\n regionNotGo_query = GIVEN_QUERY['regionNotGo']\n except:\n regionNotGo_query = []\n try:\n poiNotGo_query = GIVEN_QUERY['poiNotGo']\n except:\n poiNotGo_query = []\n try:\n regionSorted_query = GIVEN_QUERY['regionSorted']\n except:\n regionSorted_query = []\n try:\n availableMonths_query = GIVEN_QUERY['availableMonths']\n except:\n availableMonths_query = []\n try:\n price_query = GIVEN_QUERY['price']\n except:\n price_query = None\n try:\n hotelRating_query = GIVEN_QUERY['hotelRating']\n except:\n hotelRating_query = None\n try:\n arrivalRegionId_query = GIVEN_QUERY['arrivalRegionId']\n except:\n arrivalRegionId_query = None\n try:\n departRegionId_query = GIVEN_QUERY['departRegionId']\n except:\n departRegionId_query = None\n connection = mysqlConnection()\n try:\n with connection.cursor() as cursor:\n if GIVEN_QUERY['countries']:\n if arrivalRegionId_query:\n sql = (\n \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\"\n % (arrivalRegionId_query, str(countryIds_query)[1:-1])\n )\n else:\n sql = (\n \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\"\n % str(countryIds_query)[1:-1])\n else:\n sql = (\n \"SELECT id FROM tidy_parts WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n )\n cursor.execute(sql)\n startParts = cursor.fetchall()\n if GIVEN_QUERY['countries']:\n if departRegionId_query:\n sql = (\n \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\"\n % (departRegionId_query, str(countryIds_query)[1:-1]))\n else:\n sql = (\n \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\"\n % str(countryIds_query)[1:-1])\n else:\n sql = (\n \"SELECT id FROM tidy_parts WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n )\n cursor.execute(sql)\n endParts = cursor.fetchall()\n finally:\n connection.close()\n startParts = [dict['id'] for dict in startParts]\n endParts = [dict['id'] for dict in endParts]\n return (countryIds_query, days_query, regions_query, regionDic_query,\n pois_query, regionNotGo_query, poiNotGo_query, regionSorted_query,\n availableMonths_query, price_query, hotelRating_query,\n arrivalRegionId_query, departRegionId_query, startParts, endParts)\n", "step-4": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/6/20 下午4:00\n# @Author : Huang HUi\n# @Site : \n# @File : query_parse.py\n# @Software: PyCharm\nfrom mysqlConnection import mysqlConnection\nimport yaml\nimport copy\nimport time\nimport csv\nimport json\nfrom collections import OrderedDict\nimport ast\n\n#\n# GIVEN_QUERY = {'days': [10,14], 'countries': [{'country_id': 28, 'day': None}],\n# 'regions': [{'region_id': 2, 'day': None}, {'region_id': 27, 'day': None}, {'region_id': 69, 'day': None}], 'pois': [],\n# 'regionNotGo': [], 'poiNotGo': [], 'regionSorted': [135, 131], 'availableMonths': [],\n# 'price': [5000, 15000], 'hotelRating': None, 'arrivalRegionId': 27, 'departRegionId': None}\n\n# GIVEN_QUERY={'days': [10,13], 'countries': [{'country_id': 11, 'day': None}], 'regions': [{'region_id': 266, 'day': None},\n# {'region_id': 220, 'day': None}], 'pois': [795, 800,878,1536]}\n\n# GIVEN_QUERY={'days': [12], 'countries': [{'country_id': 28, 'day': None}],\n# 'regions': [{'region_id': 2, 'day': None}, {'region_id': 70, 'day': None}],\n# 'pois': [1361, 1380, 1382, 1385, 1386, 1413, 1512, 1700, 1701, 1712, 1713]}\n\ndef query_parse(GIVEN_QUERY):\n\n try:\n countryIds_query = list(map(lambda x: x['country_id'], GIVEN_QUERY['countries']))\n except :\n countryIds_query=None\n try:\n days_query=GIVEN_QUERY['days']\n except :\n days_query=None\n try:\n regions_query = GIVEN_QUERY['regions']\n except :\n regions_query=[]\n try:\n regionDic_query = list(map(lambda x: {x['region_id']: x['day']}, regions_query))\n except :\n regionDic_query=[]\n try:\n pois_query=GIVEN_QUERY['pois']\n except :\n pois_query=[]\n try:\n regionNotGo_query=GIVEN_QUERY['regionNotGo']\n except :\n regionNotGo_query=[]\n try:\n poiNotGo_query=GIVEN_QUERY['poiNotGo']\n except :\n poiNotGo_query=[]\n try:\n regionSorted_query=GIVEN_QUERY['regionSorted']\n except :\n regionSorted_query=[]\n try:\n availableMonths_query=GIVEN_QUERY['availableMonths']\n except :\n availableMonths_query=[]\n try:\n price_query=GIVEN_QUERY['price']\n except :\n price_query=None\n try:\n hotelRating_query=GIVEN_QUERY['hotelRating']\n except :\n hotelRating_query=None\n try:\n arrivalRegionId_query=GIVEN_QUERY['arrivalRegionId']\n except :\n arrivalRegionId_query=None\n try:\n departRegionId_query=GIVEN_QUERY['departRegionId']\n except:\n departRegionId_query=None\n\n\n connection=mysqlConnection()\n try:\n with connection.cursor() as cursor:\n\n if GIVEN_QUERY['countries']:\n # country condition\n if arrivalRegionId_query:\n sql = \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\" % (arrivalRegionId_query,str(countryIds_query)[1:-1])\n else:\n sql = \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\" % str(countryIds_query)[1:-1]\n else:\n # all\n sql = \"SELECT id FROM tidy_parts WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n cursor.execute(sql)\n startParts = cursor.fetchall()\n if GIVEN_QUERY['countries']:\n if departRegionId_query:\n sql = \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\" % (departRegionId_query, str(countryIds_query)[1:-1])\n else:\n sql = \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\" % str(countryIds_query)[1:-1]\n else:\n sql = \"SELECT id FROM tidy_parts WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n cursor.execute(sql)\n endParts = cursor.fetchall()\n\n\n\n\n finally:\n connection.close()\n\n startParts = [dict['id'] for dict in startParts]\n endParts = [dict['id'] for dict in endParts]\n\n\n return countryIds_query, days_query, regions_query, regionDic_query, \\\n pois_query, regionNotGo_query, poiNotGo_query, regionSorted_query, availableMonths_query, price_query, \\\n hotelRating_query, arrivalRegionId_query, departRegionId_query,startParts,endParts\n\n\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from reportlab.lib.pagesizes import letter from reportlab.platypus import SimpleDocTemplate, Paragraph from reportlab.lib.styles import getSampleStyleSheet def paragraph_spacing(): doc = SimpleDocTemplate("paragraph_spacing.pdf", pagesize=letter) styles = getSampleStyleSheet() #Mengahasilkan spasi antar paragraf sehinga tidak diperlukan <br/> styles["Normal"].spaceBefore = 10 styles["Normal"].spaceAfter = 10 flowables = [] text = """ This <b>text</b> is important, not <strong>strong</strong>. """ para = Paragraph(text, style=styles["Normal"]) flowables.append(para) text = """ This <b>text</b> is important, not <strong>strong</strong>. """ para = Paragraph(text, style=styles["Normal"]) flowables.append(para) doc.build(flowables) if __name__ == "__main__": paragraph_spacing()
normal
{ "blob_id": "d79e65b7aa09066230dec1a472f4535dff4123b5", "index": 4217, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef paragraph_spacing():\n doc = SimpleDocTemplate('paragraph_spacing.pdf', pagesize=letter)\n styles = getSampleStyleSheet()\n styles['Normal'].spaceBefore = 10\n styles['Normal'].spaceAfter = 10\n flowables = []\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n doc.build(flowables)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef paragraph_spacing():\n doc = SimpleDocTemplate('paragraph_spacing.pdf', pagesize=letter)\n styles = getSampleStyleSheet()\n styles['Normal'].spaceBefore = 10\n styles['Normal'].spaceAfter = 10\n flowables = []\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n doc.build(flowables)\n\n\nif __name__ == '__main__':\n paragraph_spacing()\n", "step-4": "from reportlab.lib.pagesizes import letter\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph\nfrom reportlab.lib.styles import getSampleStyleSheet\n\n\ndef paragraph_spacing():\n doc = SimpleDocTemplate('paragraph_spacing.pdf', pagesize=letter)\n styles = getSampleStyleSheet()\n styles['Normal'].spaceBefore = 10\n styles['Normal'].spaceAfter = 10\n flowables = []\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles['Normal'])\n flowables.append(para)\n doc.build(flowables)\n\n\nif __name__ == '__main__':\n paragraph_spacing()\n", "step-5": "from reportlab.lib.pagesizes import letter\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph\nfrom reportlab.lib.styles import getSampleStyleSheet\n\n\ndef paragraph_spacing():\n doc = SimpleDocTemplate(\"paragraph_spacing.pdf\", pagesize=letter)\n\n styles = getSampleStyleSheet()\n #Mengahasilkan spasi antar paragraf sehinga tidak diperlukan <br/>\n styles[\"Normal\"].spaceBefore = 10\n styles[\"Normal\"].spaceAfter = 10\n\n flowables = []\n\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles[\"Normal\"])\n flowables.append(para)\n\n text = \"\"\"\n This <b>text</b> is important,\n not <strong>strong</strong>.\n \"\"\"\n para = Paragraph(text, style=styles[\"Normal\"])\n flowables.append(para)\n\n doc.build(flowables)\n\n\nif __name__ == \"__main__\":\n paragraph_spacing()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print(img.shape) print(img[257][400]) <|reserved_special_token_0|> cv2.imshow('Image', img) cv2.waitKey(0) cv2.destroyAllWindows() <|reserved_special_token_1|> <|reserved_special_token_0|> img = cv2.imread('assets/logo.jpg', -1) print(img.shape) print(img[257][400]) <|reserved_special_token_0|> tag = img[500:700, 600:900] img[100:300, 650:950] = tag cv2.imshow('Image', img) cv2.waitKey(0) cv2.destroyAllWindows() <|reserved_special_token_1|> import random import cv2 img = cv2.imread('assets/logo.jpg', -1) print(img.shape) print(img[257][400]) <|reserved_special_token_0|> tag = img[500:700, 600:900] img[100:300, 650:950] = tag cv2.imshow('Image', img) cv2.waitKey(0) cv2.destroyAllWindows() <|reserved_special_token_1|> import random import cv2 img = cv2.imread('assets/logo.jpg', -1) print(img.shape) #3 channels, bgr #look at the 257. row and pixel 400 --> has bgr values: [41 98 243] print(img[257][400]) ''' # manipulate the first 100 rows, all columns, and randomize the 3 pixel values # (rows, colums, pixels) where pixels: b,g,r for i in range(100): #first 100 rows for j in range(img.shape[1]): #all the colums img[i][j] = [random.randint(0,255),random.randint(0,255),random.randint(0,255)] cv2.imshow('modifiedImage', img) cv2.waitKey(0) cv2.destroyAllWindows() ''' #copy one part of the image and copy it somewhere else #take the pixels from row 500 bis 700 und davon die colums 600:900 tag = img[500:700, 600:900] #part of the picture #paste this on another location in the image; needs same dimeension/ size img[100:300, 650:950] = tag cv2.imshow('Image', img) cv2.waitKey(0) cv2.destroyAllWindows()
flexible
{ "blob_id": "35e66e5e154f5cd70f187a1cde33cef71102e1a6", "index": 6829, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(img.shape)\nprint(img[257][400])\n<mask token>\ncv2.imshow('Image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n", "step-3": "<mask token>\nimg = cv2.imread('assets/logo.jpg', -1)\nprint(img.shape)\nprint(img[257][400])\n<mask token>\ntag = img[500:700, 600:900]\nimg[100:300, 650:950] = tag\ncv2.imshow('Image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n", "step-4": "import random\nimport cv2\nimg = cv2.imread('assets/logo.jpg', -1)\nprint(img.shape)\nprint(img[257][400])\n<mask token>\ntag = img[500:700, 600:900]\nimg[100:300, 650:950] = tag\ncv2.imshow('Image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n", "step-5": "import random\nimport cv2\n\nimg = cv2.imread('assets/logo.jpg', -1)\nprint(img.shape) #3 channels, bgr\n\n#look at the 257. row and pixel 400 --> has bgr values: [41 98 243]\nprint(img[257][400])\n\n'''\n# manipulate the first 100 rows, all columns, and randomize the 3 pixel values\n# (rows, colums, pixels) where pixels: b,g,r\nfor i in range(100): #first 100 rows\n for j in range(img.shape[1]): #all the colums\n img[i][j] = [random.randint(0,255),random.randint(0,255),random.randint(0,255)]\n\ncv2.imshow('modifiedImage', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n'''\n\n#copy one part of the image and copy it somewhere else\n#take the pixels from row 500 bis 700 und davon die colums 600:900\ntag = img[500:700, 600:900] #part of the picture\n\n#paste this on another location in the image; needs same dimeension/ size\nimg[100:300, 650:950] = tag\n\ncv2.imshow('Image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import pylab,numpy as np from numpy import sin from matplotlib.patches import FancyArrowPatch fig=pylab.figure() w=1 h=1 th=3.14159/25. x=np.r_[0,0,w,w,0] y=np.r_[0,h,h-w*sin(th),0-w*sin(th),0] pylab.plot(x,y) x=np.r_[0,0,w/2.0,w/2.0,0] y=np.r_[0,h/6.0,h/6.0-w/2.0*sin(th),0-w/2.0*sin(th),0] pylab.plot(x,y,'--') pylab.text(w/4.0,h/12.0-w/4.0*sin(th)-h/30.,'$A_{a,subcool}$',ha='center',va='center') h0=h-w/2.0*sin(th)-h/6.0 x=np.r_[w/2.0,w/2.0,w,w,w/2.0] y=np.r_[0+h0,h/6.0+h0,h/6.0-w/2.0*sin(th)+h0,0-w/2.0*sin(th)+h0,0+h0] pylab.plot(x,y,'--') pylab.text(0.75*w,h-h/12.0-0.75*w*sin(th)-h/30.,'$A_{a,superheat}$',ha='center',va='center') pylab.text(0.5*w,h/2.0-0.5*w*sin(th),'$A_{a,two-phase}$',ha='center',va='center') ##Add the circuits for y0 in [h/12.,h/12.+h/6.,h/12.+2*h/6.,h/12.+3*h/6.,h/12.+4*h/6.,h/12.+5*h/6.]: pylab.plot(np.r_[0,w],np.r_[y0,y0-w*sin(th)],'k',lw=4) pylab.gca().add_patch(FancyArrowPatch((w+w/10.,h-h/12.0-(w+w/10.)*sin(th)),(w,h-h/12.0-w*sin(th)),arrowstyle='-|>',fc='k',ec='k',mutation_scale=20,lw=0.8)) pylab.gca().add_patch(FancyArrowPatch((0,h/12.0),(-w/10.,h/12.0-(-w/10.)*sin(th)),arrowstyle='-|>',fc='k',ec='k',mutation_scale=20,lw=0.8)) pylab.gca().axis('equal') pylab.gca().axis('off') pylab.show()
normal
{ "blob_id": "c485466a736fa0a4f183092e561a27005c01316d", "index": 8616, "step-1": "<mask token>\n", "step-2": "<mask token>\npylab.plot(x, y)\n<mask token>\npylab.plot(x, y, '--')\npylab.text(w / 4.0, h / 12.0 - w / 4.0 * sin(th) - h / 30.0,\n '$A_{a,subcool}$', ha='center', va='center')\n<mask token>\npylab.plot(x, y, '--')\npylab.text(0.75 * w, h - h / 12.0 - 0.75 * w * sin(th) - h / 30.0,\n '$A_{a,superheat}$', ha='center', va='center')\npylab.text(0.5 * w, h / 2.0 - 0.5 * w * sin(th), '$A_{a,two-phase}$', ha=\n 'center', va='center')\nfor y0 in [h / 12.0, h / 12.0 + h / 6.0, h / 12.0 + 2 * h / 6.0, h / 12.0 +\n 3 * h / 6.0, h / 12.0 + 4 * h / 6.0, h / 12.0 + 5 * h / 6.0]:\n pylab.plot(np.r_[0, w], np.r_[y0, y0 - w * sin(th)], 'k', lw=4)\npylab.gca().add_patch(FancyArrowPatch((w + w / 10.0, h - h / 12.0 - (w + w /\n 10.0) * sin(th)), (w, h - h / 12.0 - w * sin(th)), arrowstyle='-|>', fc\n ='k', ec='k', mutation_scale=20, lw=0.8))\npylab.gca().add_patch(FancyArrowPatch((0, h / 12.0), (-w / 10.0, h / 12.0 -\n -w / 10.0 * sin(th)), arrowstyle='-|>', fc='k', ec='k', mutation_scale=\n 20, lw=0.8))\npylab.gca().axis('equal')\npylab.gca().axis('off')\npylab.show()\n", "step-3": "<mask token>\nfig = pylab.figure()\nw = 1\nh = 1\nth = 3.14159 / 25.0\nx = np.r_[0, 0, w, w, 0]\ny = np.r_[0, h, h - w * sin(th), 0 - w * sin(th), 0]\npylab.plot(x, y)\nx = np.r_[0, 0, w / 2.0, w / 2.0, 0]\ny = np.r_[0, h / 6.0, h / 6.0 - w / 2.0 * sin(th), 0 - w / 2.0 * sin(th), 0]\npylab.plot(x, y, '--')\npylab.text(w / 4.0, h / 12.0 - w / 4.0 * sin(th) - h / 30.0,\n '$A_{a,subcool}$', ha='center', va='center')\nh0 = h - w / 2.0 * sin(th) - h / 6.0\nx = np.r_[w / 2.0, w / 2.0, w, w, w / 2.0]\ny = np.r_[0 + h0, h / 6.0 + h0, h / 6.0 - w / 2.0 * sin(th) + h0, 0 - w / \n 2.0 * sin(th) + h0, 0 + h0]\npylab.plot(x, y, '--')\npylab.text(0.75 * w, h - h / 12.0 - 0.75 * w * sin(th) - h / 30.0,\n '$A_{a,superheat}$', ha='center', va='center')\npylab.text(0.5 * w, h / 2.0 - 0.5 * w * sin(th), '$A_{a,two-phase}$', ha=\n 'center', va='center')\nfor y0 in [h / 12.0, h / 12.0 + h / 6.0, h / 12.0 + 2 * h / 6.0, h / 12.0 +\n 3 * h / 6.0, h / 12.0 + 4 * h / 6.0, h / 12.0 + 5 * h / 6.0]:\n pylab.plot(np.r_[0, w], np.r_[y0, y0 - w * sin(th)], 'k', lw=4)\npylab.gca().add_patch(FancyArrowPatch((w + w / 10.0, h - h / 12.0 - (w + w /\n 10.0) * sin(th)), (w, h - h / 12.0 - w * sin(th)), arrowstyle='-|>', fc\n ='k', ec='k', mutation_scale=20, lw=0.8))\npylab.gca().add_patch(FancyArrowPatch((0, h / 12.0), (-w / 10.0, h / 12.0 -\n -w / 10.0 * sin(th)), arrowstyle='-|>', fc='k', ec='k', mutation_scale=\n 20, lw=0.8))\npylab.gca().axis('equal')\npylab.gca().axis('off')\npylab.show()\n", "step-4": "import pylab, numpy as np\nfrom numpy import sin\nfrom matplotlib.patches import FancyArrowPatch\nfig = pylab.figure()\nw = 1\nh = 1\nth = 3.14159 / 25.0\nx = np.r_[0, 0, w, w, 0]\ny = np.r_[0, h, h - w * sin(th), 0 - w * sin(th), 0]\npylab.plot(x, y)\nx = np.r_[0, 0, w / 2.0, w / 2.0, 0]\ny = np.r_[0, h / 6.0, h / 6.0 - w / 2.0 * sin(th), 0 - w / 2.0 * sin(th), 0]\npylab.plot(x, y, '--')\npylab.text(w / 4.0, h / 12.0 - w / 4.0 * sin(th) - h / 30.0,\n '$A_{a,subcool}$', ha='center', va='center')\nh0 = h - w / 2.0 * sin(th) - h / 6.0\nx = np.r_[w / 2.0, w / 2.0, w, w, w / 2.0]\ny = np.r_[0 + h0, h / 6.0 + h0, h / 6.0 - w / 2.0 * sin(th) + h0, 0 - w / \n 2.0 * sin(th) + h0, 0 + h0]\npylab.plot(x, y, '--')\npylab.text(0.75 * w, h - h / 12.0 - 0.75 * w * sin(th) - h / 30.0,\n '$A_{a,superheat}$', ha='center', va='center')\npylab.text(0.5 * w, h / 2.0 - 0.5 * w * sin(th), '$A_{a,two-phase}$', ha=\n 'center', va='center')\nfor y0 in [h / 12.0, h / 12.0 + h / 6.0, h / 12.0 + 2 * h / 6.0, h / 12.0 +\n 3 * h / 6.0, h / 12.0 + 4 * h / 6.0, h / 12.0 + 5 * h / 6.0]:\n pylab.plot(np.r_[0, w], np.r_[y0, y0 - w * sin(th)], 'k', lw=4)\npylab.gca().add_patch(FancyArrowPatch((w + w / 10.0, h - h / 12.0 - (w + w /\n 10.0) * sin(th)), (w, h - h / 12.0 - w * sin(th)), arrowstyle='-|>', fc\n ='k', ec='k', mutation_scale=20, lw=0.8))\npylab.gca().add_patch(FancyArrowPatch((0, h / 12.0), (-w / 10.0, h / 12.0 -\n -w / 10.0 * sin(th)), arrowstyle='-|>', fc='k', ec='k', mutation_scale=\n 20, lw=0.8))\npylab.gca().axis('equal')\npylab.gca().axis('off')\npylab.show()\n", "step-5": "import pylab,numpy as np\r\nfrom numpy import sin\r\nfrom matplotlib.patches import FancyArrowPatch\r\n\r\nfig=pylab.figure()\r\nw=1\r\nh=1\r\nth=3.14159/25.\r\nx=np.r_[0,0,w,w,0]\r\ny=np.r_[0,h,h-w*sin(th),0-w*sin(th),0]\r\npylab.plot(x,y)\r\n\r\nx=np.r_[0,0,w/2.0,w/2.0,0]\r\ny=np.r_[0,h/6.0,h/6.0-w/2.0*sin(th),0-w/2.0*sin(th),0]\r\npylab.plot(x,y,'--')\r\npylab.text(w/4.0,h/12.0-w/4.0*sin(th)-h/30.,'$A_{a,subcool}$',ha='center',va='center')\r\n\r\nh0=h-w/2.0*sin(th)-h/6.0\r\nx=np.r_[w/2.0,w/2.0,w,w,w/2.0]\r\ny=np.r_[0+h0,h/6.0+h0,h/6.0-w/2.0*sin(th)+h0,0-w/2.0*sin(th)+h0,0+h0]\r\npylab.plot(x,y,'--')\r\npylab.text(0.75*w,h-h/12.0-0.75*w*sin(th)-h/30.,'$A_{a,superheat}$',ha='center',va='center')\r\n\r\npylab.text(0.5*w,h/2.0-0.5*w*sin(th),'$A_{a,two-phase}$',ha='center',va='center')\r\n\r\n##Add the circuits\r\nfor y0 in [h/12.,h/12.+h/6.,h/12.+2*h/6.,h/12.+3*h/6.,h/12.+4*h/6.,h/12.+5*h/6.]:\r\n pylab.plot(np.r_[0,w],np.r_[y0,y0-w*sin(th)],'k',lw=4)\r\n \r\npylab.gca().add_patch(FancyArrowPatch((w+w/10.,h-h/12.0-(w+w/10.)*sin(th)),(w,h-h/12.0-w*sin(th)),arrowstyle='-|>',fc='k',ec='k',mutation_scale=20,lw=0.8))\r\npylab.gca().add_patch(FancyArrowPatch((0,h/12.0),(-w/10.,h/12.0-(-w/10.)*sin(th)),arrowstyle='-|>',fc='k',ec='k',mutation_scale=20,lw=0.8))\r\n \r\npylab.gca().axis('equal')\r\npylab.gca().axis('off')\r\npylab.show()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
""" This module takes care of starting the API Server, Loading the DB and Adding the endpoints """ import os from flask import Flask, request, jsonify, url_for from flask_migrate import Migrate from flask_swagger import swagger from flask_cors import CORS from flask_jwt_extended import ( JWTManager, jwt_required, create_access_token, create_refresh_token, get_jwt_identity* ) from utils import APIException, generate_sitemap from models import db from models import User from passlib.hash import pbkdf2_sha256 as sha256 app = Flask(__name__) app.url_map.strict_slashes = False app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DB_CONNECTION_STRING') app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.config['JWT_SECRET_KEY'] = os.environ.get('SECRET_KEY') app.config['JWT_ACCESS_TOKEN_EXPIRES'] = 3600 MIGRATE = Migrate(app, db) db.init_app(app) CORS(app) jwt = JWTManager(app)* # Handle/serialize errors like a JSON object @app.errorhandler(APIException) def handle_invalid_usage(error): return jsonify(error.to_dict()), error.status_code # generate sitemap with all your endpoints @app.route('/') def sitemap(): return generate_sitemap(app) @app.route('/hello', methods=['POST', 'GET']) @jwt_required def handle_hello(): current_user = get_jwt_identity() response_body = { "hello": current_user } return jsonify(response_body), 200 @app.route('/login', methods=['POST'])* def handle_login(): data = request.json user = User.query.filter_by(username = data["username"]).first() if user is None: return jsonify ({ "error": "el usuario no existe" }), 404 if sha256.verify(data["password"], user.password): mivariable = create_access_token(identity=data["username"]) refresh = create_refresh_token(identity=data["username"]) return jsonify ({ "token": mivariable, "refresh": refresh }), 200 return jsonify ({ "error":"la contraseña no es valida" }), 404 @app.route('/register', methods=['POST'])* def handle_register(): data = request.json user = User() user.username = data["username"] user.mail = data["mail"] user.password = sha256.hash(data["password"]) db.session.add(user) db.session.commit() return jsonify(user.serialize()), 200 # this only runs if `$ python src/main.py` is executed if __name__ == '__main__': PORT = int(os.environ.get('PORT', 3000)) app.run(host='0.0.0.0', port=PORT, debug=False)
normal
{ "blob_id": "36d596c1019dbaaf8dc394633ca464421517dc21", "index": 3381, "step-1": "\"\"\"\nThis module takes care of starting the API Server, Loading the DB and Adding the endpoints\n\"\"\"\nimport os\nfrom flask import Flask, request, jsonify, url_for\nfrom flask_migrate import Migrate\nfrom flask_swagger import swagger\nfrom flask_cors import CORS\nfrom flask_jwt_extended import (\n JWTManager, jwt_required, create_access_token, create_refresh_token,\n get_jwt_identity*\n)\nfrom utils import APIException, generate_sitemap\nfrom models import db\nfrom models import User\n\n\nfrom passlib.hash import pbkdf2_sha256 as sha256\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DB_CONNECTION_STRING')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\napp.config['JWT_SECRET_KEY'] = os.environ.get('SECRET_KEY')\napp.config['JWT_ACCESS_TOKEN_EXPIRES'] = 3600\n\nMIGRATE = Migrate(app, db)\ndb.init_app(app)\nCORS(app)\n\njwt = JWTManager(app)*\n\n\n# Handle/serialize errors like a JSON object\[email protected](APIException)\ndef handle_invalid_usage(error):\n return jsonify(error.to_dict()), error.status_code\n\n# generate sitemap with all your endpoints\[email protected]('/')\ndef sitemap():\n return generate_sitemap(app)\n\[email protected]('/hello', methods=['POST', 'GET'])\n@jwt_required\ndef handle_hello():\n current_user = get_jwt_identity()\n response_body = {\n \"hello\": current_user\n }\n\n return jsonify(response_body), 200\n\[email protected]('/login', methods=['POST'])*\ndef handle_login():\n data = request.json\n user = User.query.filter_by(username = data[\"username\"]).first()\n if user is None:\n return jsonify ({\n \"error\": \"el usuario no existe\"\n }), 404\n if sha256.verify(data[\"password\"], user.password):\n\n mivariable = create_access_token(identity=data[\"username\"])\n refresh = create_refresh_token(identity=data[\"username\"])\n return jsonify ({\n \"token\": mivariable,\n \"refresh\": refresh\n }), 200\n\n return jsonify ({\n \"error\":\"la contraseña no es valida\"\n }), 404\n\[email protected]('/register', methods=['POST'])*\ndef handle_register():\n data = request.json\n\n user = User()\n user.username = data[\"username\"]\n user.mail = data[\"mail\"]\n user.password = sha256.hash(data[\"password\"])\n\n db.session.add(user)\n db.session.commit()\n\n return jsonify(user.serialize()), 200\n\n# this only runs if `$ python src/main.py` is executed\nif __name__ == '__main__':\n PORT = int(os.environ.get('PORT', 3000))\n app.run(host='0.0.0.0', port=PORT, debug=False)\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> def remove_posts(data, index_list): data = data.drop(index_list) return data.reset_index(drop=True) <|reserved_special_token_0|> def preprocess(text): text = text.lower() text = text.replace('$', ' ') text = text.replace('-', ' ') text = text.replace('/', ' ') text = text.replace('.', ' ') text = word_tokenize(text) text = [word for word in text if word not in stop_words] text = [word for word in text if word.isalpha()] return text <|reserved_special_token_0|> def count_keywords(comments, keywords): return {word: comments.count(word) for word in keywords if comments. count(word) > 0} <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def remove_posts(data, index_list): data = data.drop(index_list) return data.reset_index(drop=True) <|reserved_special_token_0|> for index, thread in enumerate(comments): aggregate = [] for comment in thread: if type(comment['comment_reply']) == str: aggregate.append(comment['comment_reply'].lower()) comments_in_thread.append(aggregate) <|reserved_special_token_0|> assert len(titles) == len(content) assert len(comments) == len(content) <|reserved_special_token_0|> stop_words.extend(['would', 'people', 'money', 'think', 'thinks', 'thanks', 'thing', 'things', 'ok', 'nt', 'actually', 'like', 'get', 'even', 'could', 'also']) def preprocess(text): text = text.lower() text = text.replace('$', ' ') text = text.replace('-', ' ') text = text.replace('/', ' ') text = text.replace('.', ' ') text = word_tokenize(text) text = [word for word in text if word not in stop_words] text = [word for word in text if word.isalpha()] return text <|reserved_special_token_0|> for i, thread in enumerate(comments): temp_thread = [] temp_thread.extend(titles[i]) for comment in thread: temp_thread.extend(preprocess(comment)) temp.append(temp_thread) <|reserved_special_token_0|> for index, title in enumerate(titles): text = '' bag_of_words = set(title) text = ' '.join(comments_in_thread[index]) dictionary = {word: text.count(word) for word in bag_of_words if text. count(word) > 0} list_of_dict.append(dictionary) <|reserved_special_token_0|> def count_keywords(comments, keywords): return {word: comments.count(word) for word in keywords if comments. count(word) > 0} <|reserved_special_token_0|> for index, thread in enumerate(keyword_dict): df = pd.DataFrame() df['word'] = thread.keys() df['count'] = thread.values() df = df.sort_values('count', ascending=False) df['frequency'] = df['count'] / (1 + len(comments_in_thread[index])) df['count'] = df['count'] / len(comments[index]) ** 0.5 keyword_dict[index] = df.reset_index(drop=True) <|reserved_special_token_0|> with open('variables.txt', 'wb') as fp: pickle.dump(variables, fp) <|reserved_special_token_1|> <|reserved_special_token_0|> client = MongoClient() db = client.redditCrawler collection = db.data_test1 def remove_posts(data, index_list): data = data.drop(index_list) return data.reset_index(drop=True) data = pd.DataFrame(list(collection.find())) mod_posts = [i for i in range(len(data)) if 'moronic Monday' in data[ 'title'][i]] data = remove_posts(data, mod_posts) titles = data['title'] content = data['post'] comments = data['comments'] comments_in_thread = [] for index, thread in enumerate(comments): aggregate = [] for comment in thread: if type(comment['comment_reply']) == str: aggregate.append(comment['comment_reply'].lower()) comments_in_thread.append(aggregate) comments = comments_in_thread assert len(titles) == len(content) assert len(comments) == len(content) stop_words = stopwords.words('english') stop_words.extend(['would', 'people', 'money', 'think', 'thinks', 'thanks', 'thing', 'things', 'ok', 'nt', 'actually', 'like', 'get', 'even', 'could', 'also']) def preprocess(text): text = text.lower() text = text.replace('$', ' ') text = text.replace('-', ' ') text = text.replace('/', ' ') text = text.replace('.', ' ') text = word_tokenize(text) text = [word for word in text if word not in stop_words] text = [word for word in text if word.isalpha()] return text titles = [preprocess(title) for title in titles] posts = [preprocess(text) for text in content] temp = [] for i, thread in enumerate(comments): temp_thread = [] temp_thread.extend(titles[i]) for comment in thread: temp_thread.extend(preprocess(comment)) temp.append(temp_thread) comments = temp list_of_dict = [] for index, title in enumerate(titles): text = '' bag_of_words = set(title) text = ' '.join(comments_in_thread[index]) dictionary = {word: text.count(word) for word in bag_of_words if text. count(word) > 0} list_of_dict.append(dictionary) title_keywords = [(list(Dict.keys()) if len(Dict) > 0 else [0]) for Dict in list_of_dict] title_keywords = [word for sublist in title_keywords for word in sublist if word != 0] title_keywords = set(title_keywords) def count_keywords(comments, keywords): return {word: comments.count(word) for word in keywords if comments. count(word) > 0} keyword_dict = [count_keywords(comment, title_keywords) for comment in comments ] for index, thread in enumerate(keyword_dict): df = pd.DataFrame() df['word'] = thread.keys() df['count'] = thread.values() df = df.sort_values('count', ascending=False) df['frequency'] = df['count'] / (1 + len(comments_in_thread[index])) df['count'] = df['count'] / len(comments[index]) ** 0.5 keyword_dict[index] = df.reset_index(drop=True) variables = [data['title'], titles, posts, comments, comments_in_thread, list_of_dict, title_keywords, keyword_dict] with open('variables.txt', 'wb') as fp: pickle.dump(variables, fp) <|reserved_special_token_1|> import pymongo import pandas as pd import re from pymongo import MongoClient from nltk.corpus import stopwords from nltk import word_tokenize from gensim import corpora import pickle client = MongoClient() db = client.redditCrawler collection = db.data_test1 def remove_posts(data, index_list): data = data.drop(index_list) return data.reset_index(drop=True) data = pd.DataFrame(list(collection.find())) mod_posts = [i for i in range(len(data)) if 'moronic Monday' in data[ 'title'][i]] data = remove_posts(data, mod_posts) titles = data['title'] content = data['post'] comments = data['comments'] comments_in_thread = [] for index, thread in enumerate(comments): aggregate = [] for comment in thread: if type(comment['comment_reply']) == str: aggregate.append(comment['comment_reply'].lower()) comments_in_thread.append(aggregate) comments = comments_in_thread assert len(titles) == len(content) assert len(comments) == len(content) stop_words = stopwords.words('english') stop_words.extend(['would', 'people', 'money', 'think', 'thinks', 'thanks', 'thing', 'things', 'ok', 'nt', 'actually', 'like', 'get', 'even', 'could', 'also']) def preprocess(text): text = text.lower() text = text.replace('$', ' ') text = text.replace('-', ' ') text = text.replace('/', ' ') text = text.replace('.', ' ') text = word_tokenize(text) text = [word for word in text if word not in stop_words] text = [word for word in text if word.isalpha()] return text titles = [preprocess(title) for title in titles] posts = [preprocess(text) for text in content] temp = [] for i, thread in enumerate(comments): temp_thread = [] temp_thread.extend(titles[i]) for comment in thread: temp_thread.extend(preprocess(comment)) temp.append(temp_thread) comments = temp list_of_dict = [] for index, title in enumerate(titles): text = '' bag_of_words = set(title) text = ' '.join(comments_in_thread[index]) dictionary = {word: text.count(word) for word in bag_of_words if text. count(word) > 0} list_of_dict.append(dictionary) title_keywords = [(list(Dict.keys()) if len(Dict) > 0 else [0]) for Dict in list_of_dict] title_keywords = [word for sublist in title_keywords for word in sublist if word != 0] title_keywords = set(title_keywords) def count_keywords(comments, keywords): return {word: comments.count(word) for word in keywords if comments. count(word) > 0} keyword_dict = [count_keywords(comment, title_keywords) for comment in comments ] for index, thread in enumerate(keyword_dict): df = pd.DataFrame() df['word'] = thread.keys() df['count'] = thread.values() df = df.sort_values('count', ascending=False) df['frequency'] = df['count'] / (1 + len(comments_in_thread[index])) df['count'] = df['count'] / len(comments[index]) ** 0.5 keyword_dict[index] = df.reset_index(drop=True) variables = [data['title'], titles, posts, comments, comments_in_thread, list_of_dict, title_keywords, keyword_dict] with open('variables.txt', 'wb') as fp: pickle.dump(variables, fp) <|reserved_special_token_1|> import pymongo import pandas as pd import re from pymongo import MongoClient from nltk.corpus import stopwords from nltk import word_tokenize from gensim import corpora import pickle client = MongoClient() db = client.redditCrawler collection = db.data_test1 def remove_posts(data, index_list): data = data.drop(index_list) return data.reset_index(drop=True) data = pd.DataFrame(list(collection.find())) mod_posts = [i for i in range(len(data)) if 'moronic Monday' in data['title'][i]] #remove all the mod posts that include 'moronic Monday' data = remove_posts(data, mod_posts) titles = data['title'] content = data['post'] comments = data['comments'] # collect only the comments without vote scores, dates, etc comments_in_thread = [] for index, thread in enumerate(comments): aggregate = [] for comment in thread: if type(comment['comment_reply']) == str: aggregate.append(comment['comment_reply'].lower()) comments_in_thread.append(aggregate) comments = comments_in_thread #number of titles and post need to be the same assert len(titles) == len(content) assert len(comments) == len(content) #preprocess stop_words = stopwords.words('english') stop_words.extend(['would', 'people', 'money', 'think', 'thinks', 'thanks', 'thing', 'things', 'ok', 'nt', 'actually', 'like', 'get', 'even', 'could', 'also', ]) #Function to clean off each dataset item; stop words (what, if, is, where, how, I, she) def preprocess(text): #no content/nan/len of 0 #text = [re.sub('[^a-zA-Z0-9]+', ' ', word) for word in text] text = text.lower() text = text.replace('$', ' ') text = text.replace('-', ' ') text = text.replace("/", ' ') text = text.replace(".", ' ') text = word_tokenize(text) ## text = [re.sub('[^a-zA-Z0-9]+', '', word) for word in text] text = [word for word in text if word not in stop_words] text = [word for word in text if word.isalpha()] return text #pass titles and comments through pre-processor titles = [preprocess(title) for title in titles] posts = [preprocess(text) for text in content] # process comments ##comments = [[preprocess(comment) for comment in thread] for thread in comments] temp = [] for i, thread in enumerate(comments): temp_thread = [] temp_thread.extend(titles[i]) for comment in thread: temp_thread.extend(preprocess(comment)) temp.append(temp_thread) comments = temp # form a list of dictionaries for each title, compile # each word and its corresponding frequencies in the post's comment section list_of_dict = [] for index, title in enumerate(titles): text = '' bag_of_words = set(title) text = ' '.join(comments_in_thread[index]) ## text = comments[index] dictionary = {word:text.count(word) for word in bag_of_words if text.count(word) > 0} list_of_dict.append(dictionary) title_keywords = [list(Dict.keys()) if len(Dict) > 0 else [0] for Dict in list_of_dict] title_keywords = [word for sublist in title_keywords for word in sublist if word != 0 ] title_keywords = set(title_keywords) ##title_keywords = set(title_keywords) ##count the number of keywords in the comment section def count_keywords(comments, keywords): ## sample = ' '.join(comments).split() return {word: comments.count(word) for word in keywords if comments.count(word) > 0} keyword_dict = [count_keywords(comment, title_keywords) for comment in comments] for index, thread in enumerate(keyword_dict): #normalize each keyword by the number of words present df = pd.DataFrame() df['word'] = thread.keys() df['count'] = thread.values() df = df.sort_values('count', ascending = False) #dividing by number of words in each thread ## df['frequency'] = df['count']/(len(comments[index])) df['frequency'] = df['count']/(1+len(comments_in_thread[index])) df['count'] = df['count']/(len(comments[index]))**0.5 keyword_dict[index] = df.reset_index(drop=True) #save varialbes variables = [data['title'], titles, posts, comments, comments_in_thread, list_of_dict, title_keywords, keyword_dict] with open('variables.txt', 'wb') as fp: pickle.dump(variables, fp)
flexible
{ "blob_id": "341fb4442ba1d1bb13dbbe123e1051e1ceeb91e7", "index": 4431, "step-1": "<mask token>\n\n\ndef remove_posts(data, index_list):\n data = data.drop(index_list)\n return data.reset_index(drop=True)\n\n\n<mask token>\n\n\ndef preprocess(text):\n text = text.lower()\n text = text.replace('$', ' ')\n text = text.replace('-', ' ')\n text = text.replace('/', ' ')\n text = text.replace('.', ' ')\n text = word_tokenize(text)\n text = [word for word in text if word not in stop_words]\n text = [word for word in text if word.isalpha()]\n return text\n\n\n<mask token>\n\n\ndef count_keywords(comments, keywords):\n return {word: comments.count(word) for word in keywords if comments.\n count(word) > 0}\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef remove_posts(data, index_list):\n data = data.drop(index_list)\n return data.reset_index(drop=True)\n\n\n<mask token>\nfor index, thread in enumerate(comments):\n aggregate = []\n for comment in thread:\n if type(comment['comment_reply']) == str:\n aggregate.append(comment['comment_reply'].lower())\n comments_in_thread.append(aggregate)\n<mask token>\nassert len(titles) == len(content)\nassert len(comments) == len(content)\n<mask token>\nstop_words.extend(['would', 'people', 'money', 'think', 'thinks', 'thanks',\n 'thing', 'things', 'ok', 'nt', 'actually', 'like', 'get', 'even',\n 'could', 'also'])\n\n\ndef preprocess(text):\n text = text.lower()\n text = text.replace('$', ' ')\n text = text.replace('-', ' ')\n text = text.replace('/', ' ')\n text = text.replace('.', ' ')\n text = word_tokenize(text)\n text = [word for word in text if word not in stop_words]\n text = [word for word in text if word.isalpha()]\n return text\n\n\n<mask token>\nfor i, thread in enumerate(comments):\n temp_thread = []\n temp_thread.extend(titles[i])\n for comment in thread:\n temp_thread.extend(preprocess(comment))\n temp.append(temp_thread)\n<mask token>\nfor index, title in enumerate(titles):\n text = ''\n bag_of_words = set(title)\n text = ' '.join(comments_in_thread[index])\n dictionary = {word: text.count(word) for word in bag_of_words if text.\n count(word) > 0}\n list_of_dict.append(dictionary)\n<mask token>\n\n\ndef count_keywords(comments, keywords):\n return {word: comments.count(word) for word in keywords if comments.\n count(word) > 0}\n\n\n<mask token>\nfor index, thread in enumerate(keyword_dict):\n df = pd.DataFrame()\n df['word'] = thread.keys()\n df['count'] = thread.values()\n df = df.sort_values('count', ascending=False)\n df['frequency'] = df['count'] / (1 + len(comments_in_thread[index]))\n df['count'] = df['count'] / len(comments[index]) ** 0.5\n keyword_dict[index] = df.reset_index(drop=True)\n<mask token>\nwith open('variables.txt', 'wb') as fp:\n pickle.dump(variables, fp)\n", "step-3": "<mask token>\nclient = MongoClient()\ndb = client.redditCrawler\ncollection = db.data_test1\n\n\ndef remove_posts(data, index_list):\n data = data.drop(index_list)\n return data.reset_index(drop=True)\n\n\ndata = pd.DataFrame(list(collection.find()))\nmod_posts = [i for i in range(len(data)) if 'moronic Monday' in data[\n 'title'][i]]\ndata = remove_posts(data, mod_posts)\ntitles = data['title']\ncontent = data['post']\ncomments = data['comments']\ncomments_in_thread = []\nfor index, thread in enumerate(comments):\n aggregate = []\n for comment in thread:\n if type(comment['comment_reply']) == str:\n aggregate.append(comment['comment_reply'].lower())\n comments_in_thread.append(aggregate)\ncomments = comments_in_thread\nassert len(titles) == len(content)\nassert len(comments) == len(content)\nstop_words = stopwords.words('english')\nstop_words.extend(['would', 'people', 'money', 'think', 'thinks', 'thanks',\n 'thing', 'things', 'ok', 'nt', 'actually', 'like', 'get', 'even',\n 'could', 'also'])\n\n\ndef preprocess(text):\n text = text.lower()\n text = text.replace('$', ' ')\n text = text.replace('-', ' ')\n text = text.replace('/', ' ')\n text = text.replace('.', ' ')\n text = word_tokenize(text)\n text = [word for word in text if word not in stop_words]\n text = [word for word in text if word.isalpha()]\n return text\n\n\ntitles = [preprocess(title) for title in titles]\nposts = [preprocess(text) for text in content]\ntemp = []\nfor i, thread in enumerate(comments):\n temp_thread = []\n temp_thread.extend(titles[i])\n for comment in thread:\n temp_thread.extend(preprocess(comment))\n temp.append(temp_thread)\ncomments = temp\nlist_of_dict = []\nfor index, title in enumerate(titles):\n text = ''\n bag_of_words = set(title)\n text = ' '.join(comments_in_thread[index])\n dictionary = {word: text.count(word) for word in bag_of_words if text.\n count(word) > 0}\n list_of_dict.append(dictionary)\ntitle_keywords = [(list(Dict.keys()) if len(Dict) > 0 else [0]) for Dict in\n list_of_dict]\ntitle_keywords = [word for sublist in title_keywords for word in sublist if\n word != 0]\ntitle_keywords = set(title_keywords)\n\n\ndef count_keywords(comments, keywords):\n return {word: comments.count(word) for word in keywords if comments.\n count(word) > 0}\n\n\nkeyword_dict = [count_keywords(comment, title_keywords) for comment in comments\n ]\nfor index, thread in enumerate(keyword_dict):\n df = pd.DataFrame()\n df['word'] = thread.keys()\n df['count'] = thread.values()\n df = df.sort_values('count', ascending=False)\n df['frequency'] = df['count'] / (1 + len(comments_in_thread[index]))\n df['count'] = df['count'] / len(comments[index]) ** 0.5\n keyword_dict[index] = df.reset_index(drop=True)\nvariables = [data['title'], titles, posts, comments, comments_in_thread,\n list_of_dict, title_keywords, keyword_dict]\nwith open('variables.txt', 'wb') as fp:\n pickle.dump(variables, fp)\n", "step-4": "import pymongo\nimport pandas as pd\nimport re\nfrom pymongo import MongoClient\nfrom nltk.corpus import stopwords\nfrom nltk import word_tokenize\nfrom gensim import corpora\nimport pickle\nclient = MongoClient()\ndb = client.redditCrawler\ncollection = db.data_test1\n\n\ndef remove_posts(data, index_list):\n data = data.drop(index_list)\n return data.reset_index(drop=True)\n\n\ndata = pd.DataFrame(list(collection.find()))\nmod_posts = [i for i in range(len(data)) if 'moronic Monday' in data[\n 'title'][i]]\ndata = remove_posts(data, mod_posts)\ntitles = data['title']\ncontent = data['post']\ncomments = data['comments']\ncomments_in_thread = []\nfor index, thread in enumerate(comments):\n aggregate = []\n for comment in thread:\n if type(comment['comment_reply']) == str:\n aggregate.append(comment['comment_reply'].lower())\n comments_in_thread.append(aggregate)\ncomments = comments_in_thread\nassert len(titles) == len(content)\nassert len(comments) == len(content)\nstop_words = stopwords.words('english')\nstop_words.extend(['would', 'people', 'money', 'think', 'thinks', 'thanks',\n 'thing', 'things', 'ok', 'nt', 'actually', 'like', 'get', 'even',\n 'could', 'also'])\n\n\ndef preprocess(text):\n text = text.lower()\n text = text.replace('$', ' ')\n text = text.replace('-', ' ')\n text = text.replace('/', ' ')\n text = text.replace('.', ' ')\n text = word_tokenize(text)\n text = [word for word in text if word not in stop_words]\n text = [word for word in text if word.isalpha()]\n return text\n\n\ntitles = [preprocess(title) for title in titles]\nposts = [preprocess(text) for text in content]\ntemp = []\nfor i, thread in enumerate(comments):\n temp_thread = []\n temp_thread.extend(titles[i])\n for comment in thread:\n temp_thread.extend(preprocess(comment))\n temp.append(temp_thread)\ncomments = temp\nlist_of_dict = []\nfor index, title in enumerate(titles):\n text = ''\n bag_of_words = set(title)\n text = ' '.join(comments_in_thread[index])\n dictionary = {word: text.count(word) for word in bag_of_words if text.\n count(word) > 0}\n list_of_dict.append(dictionary)\ntitle_keywords = [(list(Dict.keys()) if len(Dict) > 0 else [0]) for Dict in\n list_of_dict]\ntitle_keywords = [word for sublist in title_keywords for word in sublist if\n word != 0]\ntitle_keywords = set(title_keywords)\n\n\ndef count_keywords(comments, keywords):\n return {word: comments.count(word) for word in keywords if comments.\n count(word) > 0}\n\n\nkeyword_dict = [count_keywords(comment, title_keywords) for comment in comments\n ]\nfor index, thread in enumerate(keyword_dict):\n df = pd.DataFrame()\n df['word'] = thread.keys()\n df['count'] = thread.values()\n df = df.sort_values('count', ascending=False)\n df['frequency'] = df['count'] / (1 + len(comments_in_thread[index]))\n df['count'] = df['count'] / len(comments[index]) ** 0.5\n keyword_dict[index] = df.reset_index(drop=True)\nvariables = [data['title'], titles, posts, comments, comments_in_thread,\n list_of_dict, title_keywords, keyword_dict]\nwith open('variables.txt', 'wb') as fp:\n pickle.dump(variables, fp)\n", "step-5": "import pymongo\nimport pandas as pd\nimport re\nfrom pymongo import MongoClient\nfrom nltk.corpus import stopwords\nfrom nltk import word_tokenize\nfrom gensim import corpora\n\nimport pickle\n\nclient = MongoClient()\ndb = client.redditCrawler\ncollection = db.data_test1\n\ndef remove_posts(data, index_list):\n data = data.drop(index_list)\n return data.reset_index(drop=True)\n\ndata = pd.DataFrame(list(collection.find()))\nmod_posts = [i for i in range(len(data)) if 'moronic Monday' in data['title'][i]]\n\n#remove all the mod posts that include 'moronic Monday'\ndata = remove_posts(data, mod_posts)\ntitles = data['title']\ncontent = data['post']\ncomments = data['comments']\n\n# collect only the comments without vote scores, dates, etc\ncomments_in_thread = []\nfor index, thread in enumerate(comments):\n aggregate = []\n for comment in thread:\n if type(comment['comment_reply']) == str:\n aggregate.append(comment['comment_reply'].lower())\n comments_in_thread.append(aggregate)\n\ncomments = comments_in_thread\n#number of titles and post need to be the same\nassert len(titles) == len(content) \nassert len(comments) == len(content)\n\n\n#preprocess\nstop_words = stopwords.words('english')\nstop_words.extend(['would',\n 'people',\n 'money',\n 'think',\n 'thinks',\n 'thanks',\n 'thing',\n 'things',\n 'ok',\n 'nt',\n 'actually',\n 'like',\n 'get',\n 'even',\n 'could',\n 'also',\n ])\n\n#Function to clean off each dataset item; stop words (what, if, is, where, how, I, she)\n\ndef preprocess(text):\n #no content/nan/len of 0\n #text = [re.sub('[^a-zA-Z0-9]+', ' ', word) for word in text]\n text = text.lower()\n text = text.replace('$', ' ')\n text = text.replace('-', ' ')\n text = text.replace(\"/\", ' ')\n text = text.replace(\".\", ' ')\n text = word_tokenize(text)\n## text = [re.sub('[^a-zA-Z0-9]+', '', word) for word in text]\n text = [word for word in text if word not in stop_words] \n text = [word for word in text if word.isalpha()]\n return text\n\n#pass titles and comments through pre-processor\ntitles = [preprocess(title) for title in titles]\nposts = [preprocess(text) for text in content]\n\n# process comments\n##comments = [[preprocess(comment) for comment in thread] for thread in comments]\ntemp = []\nfor i, thread in enumerate(comments):\n temp_thread = []\n temp_thread.extend(titles[i])\n for comment in thread:\n temp_thread.extend(preprocess(comment))\n temp.append(temp_thread)\n\ncomments = temp\n\n# form a list of dictionaries for each title, compile\n# each word and its corresponding frequencies in the post's comment section\nlist_of_dict = []\nfor index, title in enumerate(titles):\n text = ''\n bag_of_words = set(title)\n text = ' '.join(comments_in_thread[index])\n## text = comments[index]\n dictionary = {word:text.count(word) for word in bag_of_words if text.count(word) > 0}\n list_of_dict.append(dictionary)\n\ntitle_keywords = [list(Dict.keys()) if len(Dict) > 0 else [0] for Dict in list_of_dict]\ntitle_keywords = [word for sublist in title_keywords for word in sublist if word != 0 ]\ntitle_keywords = set(title_keywords)\n##title_keywords = set(title_keywords)\n\n##count the number of keywords in the comment section\ndef count_keywords(comments, keywords):\n## sample = ' '.join(comments).split()\n return {word: comments.count(word) for word in keywords if comments.count(word) > 0}\n\nkeyword_dict = [count_keywords(comment, title_keywords) for comment in comments]\nfor index, thread in enumerate(keyword_dict):\n #normalize each keyword by the number of words present\n df = pd.DataFrame()\n df['word'] = thread.keys()\n df['count'] = thread.values()\n df = df.sort_values('count', ascending = False)\n #dividing by number of words in each thread\n## df['frequency'] = df['count']/(len(comments[index]))\n df['frequency'] = df['count']/(1+len(comments_in_thread[index]))\n df['count'] = df['count']/(len(comments[index]))**0.5\n keyword_dict[index] = df.reset_index(drop=True)\n\n#save varialbes\nvariables = [data['title'], titles, posts, comments, comments_in_thread,\n list_of_dict, title_keywords, keyword_dict]\n\nwith open('variables.txt', 'wb') as fp:\n pickle.dump(variables, fp)\n\n\n\n\n\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for line in f: l = line.strip() l = l.split(',') l = map(float, l) data.append(l) f.close() for i in range(100): shuffle(data) for l in data: train_data.append(l[0:-1]) train_class.append(int(l[-1])) <|reserved_special_token_0|> for i in range(len(train_data)): for entry in train_data[i]: f.write(str(entry) + ',') f.write(str(train_class[i]) + '\n') f.close() <|reserved_special_token_1|> <|reserved_special_token_0|> path = 'data/' filename = 'data' f = open(path + filename + '.csv', 'r') data = list() train_data = list() train_class = list() for line in f: l = line.strip() l = l.split(',') l = map(float, l) data.append(l) f.close() for i in range(100): shuffle(data) for l in data: train_data.append(l[0:-1]) train_class.append(int(l[-1])) f = open(path + filename + '_r.csv', 'w') for i in range(len(train_data)): for entry in train_data[i]: f.write(str(entry) + ',') f.write(str(train_class[i]) + '\n') f.close() <|reserved_special_token_1|> from random import shuffle path = 'data/' filename = 'data' f = open(path + filename + '.csv', 'r') data = list() train_data = list() train_class = list() for line in f: l = line.strip() l = l.split(',') l = map(float, l) data.append(l) f.close() for i in range(100): shuffle(data) for l in data: train_data.append(l[0:-1]) train_class.append(int(l[-1])) f = open(path + filename + '_r.csv', 'w') for i in range(len(train_data)): for entry in train_data[i]: f.write(str(entry) + ',') f.write(str(train_class[i]) + '\n') f.close() <|reserved_special_token_1|> ########################################################################################## ## Scene Classification ## ## Authors : Chris Andrew, Santhoshini Reddy, Nikath Yasmeen, Sai Hima, Sriya Ragini ## ################################################################### ## ## Description: This project was developed as part of the DIP course at IIIT Sri City ## ## All code is available for free usage for educational purposes ## ## Authors do not authorize commercial use of the source code ## ########################################################################################## # The following module shuffles the data to enable 10 fold cross-validation analysis ################ Imports ################ from random import shuffle ################ Global ################ path = "data/" filename = "data" ################ Source ################ # ------------------------------------ f = open(path+filename+".csv",'r') data = list() train_data = list() train_class = list() # ------------------------------------ for line in f: l = line.strip() l = l.split(',') l = map(float , l) data.append(l) # ------------------------------------ f.close() # ------------------------------------ for i in range(100): shuffle(data) # ------------------------------------ for l in data: train_data.append(l[0:-1]) train_class.append(int(l[-1])) # ------------------------------------ f = open(path+filename+"_r.csv",'w') for i in range(len(train_data)): for entry in train_data[i]: f.write(str(entry)+',') # ------------------------------------ f.write(str(train_class[i])+'\n') # ------------------------------------ f.close() # ------------------------------------------------------------------------------------------------------------------------------------------ # ------------------------------------------------------------------------------------------------------------------------------------------ # ------------------------------------------------------------------------------------------------------------------------------------------
flexible
{ "blob_id": "b8b20d6c977a6c1df6a592188c6e799f12da6a23", "index": 9734, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor line in f:\n l = line.strip()\n l = l.split(',')\n l = map(float, l)\n data.append(l)\nf.close()\nfor i in range(100):\n shuffle(data)\nfor l in data:\n train_data.append(l[0:-1])\n train_class.append(int(l[-1]))\n<mask token>\nfor i in range(len(train_data)):\n for entry in train_data[i]:\n f.write(str(entry) + ',')\n f.write(str(train_class[i]) + '\\n')\nf.close()\n", "step-3": "<mask token>\npath = 'data/'\nfilename = 'data'\nf = open(path + filename + '.csv', 'r')\ndata = list()\ntrain_data = list()\ntrain_class = list()\nfor line in f:\n l = line.strip()\n l = l.split(',')\n l = map(float, l)\n data.append(l)\nf.close()\nfor i in range(100):\n shuffle(data)\nfor l in data:\n train_data.append(l[0:-1])\n train_class.append(int(l[-1]))\nf = open(path + filename + '_r.csv', 'w')\nfor i in range(len(train_data)):\n for entry in train_data[i]:\n f.write(str(entry) + ',')\n f.write(str(train_class[i]) + '\\n')\nf.close()\n", "step-4": "from random import shuffle\npath = 'data/'\nfilename = 'data'\nf = open(path + filename + '.csv', 'r')\ndata = list()\ntrain_data = list()\ntrain_class = list()\nfor line in f:\n l = line.strip()\n l = l.split(',')\n l = map(float, l)\n data.append(l)\nf.close()\nfor i in range(100):\n shuffle(data)\nfor l in data:\n train_data.append(l[0:-1])\n train_class.append(int(l[-1]))\nf = open(path + filename + '_r.csv', 'w')\nfor i in range(len(train_data)):\n for entry in train_data[i]:\n f.write(str(entry) + ',')\n f.write(str(train_class[i]) + '\\n')\nf.close()\n", "step-5": "##########################################################################################\n## Scene Classification ##\n## Authors : Chris Andrew, Santhoshini Reddy, Nikath Yasmeen, Sai Hima, Sriya Ragini ##\n################################################################### ##\n## Description: This project was developed as part of the DIP course at IIIT Sri City ##\n## All code is available for free usage for educational purposes ##\n## Authors do not authorize commercial use of the source code ##\n##########################################################################################\n\n# The following module shuffles the data to enable 10 fold cross-validation analysis\n\n################ Imports ################\nfrom random import shuffle\n################ Global ################\npath = \"data/\"\nfilename = \"data\"\n################ Source ################\n# ------------------------------------\nf = open(path+filename+\".csv\",'r')\ndata = list()\ntrain_data = list()\ntrain_class = list()\n# ------------------------------------\nfor line in f:\n l = line.strip()\n l = l.split(',')\n l = map(float , l)\n data.append(l)\n # ------------------------------------\nf.close()\n# ------------------------------------\nfor i in range(100):\n shuffle(data)\n# ------------------------------------\nfor l in data:\n train_data.append(l[0:-1])\n train_class.append(int(l[-1]))\n# ------------------------------------\nf = open(path+filename+\"_r.csv\",'w')\nfor i in range(len(train_data)):\n for entry in train_data[i]:\n f.write(str(entry)+',')\n # ------------------------------------\n f.write(str(train_class[i])+'\\n')\n # ------------------------------------\nf.close()\n# ------------------------------------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------------------------------------\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for `feat` package.""" from feat.detector import Detector from feat.data import Fex from feat.utils import get_resource_path from .utils import get_test_data_path import pandas as pd import feat import os import wget # def test_models(): # print("Downloading FEX emotion model.") # fex_emotion_model = "https://github.com/cosanlab/feat/releases/download/v0.1/fer_aug_model.h5" # wget.download(fex_emotion_model, get_resource_path()) # if os.path.exists(os.path.join(get_resource_path(), "fer_aug_model.h5")): # print("\nFEX emotion model downloaded successfully.\n") # else: # print("Something went wrong. Model not found in directory.") # print("Downloading landmark detection model.") # lbfmodel = "https://github.com/cosanlab/feat/releases/download/v0.1/lbfmodel.yaml" # wget.download(lbfmodel, get_resource_path()) # if os.path.exists(os.path.join(get_resource_path(), "lbfmodel.yaml")): # print("\nLandmark detection model downloaded successfully.\n") # else: # print("Something went wrong. Model not found in directory.") # emotion_model = "fer_aug_model.h5" # emotion_model_path = os.path.join(get_resource_path(), emotion_model) # print("PATH TO EMOTION MODEL",emotion_model_path) # assert os.path.exists(emotion_model_path)==True # landmark_model = "lbfmodel.yaml" # landmark_model_path = os.path.join(get_resource_path(), landmark_model) # assert os.path.exists(landmark_model_path)==True def test_detector(): detector = Detector(n_jobs=1) assert detector['n_jobs']==1 assert type(detector)==Detector # Test detect image inputFname = os.path.join(get_test_data_path(), "input.jpg") out = detector.detect_image(inputFname = inputFname) assert type(out) == Fex assert len(out) == 1 assert out.happiness.values[0] > 0 outputFname = os.path.join(get_test_data_path(), "output.csv") out = detector.detect_image(inputFname=inputFname, outputFname=outputFname) assert out assert os.path.exists(outputFname) out = pd.read_csv(outputFname) assert out.happiness.values[0] > 0 # Test detect video inputFname = os.path.join(get_test_data_path(), "input.mp4") out = detector.detect_video(inputFname=inputFname) assert len(out)==72 outputFname = os.path.join(get_test_data_path(), "output.csv") out = detector.detect_video(inputFname=inputFname, outputFname=outputFname) assert out assert os.path.exists(outputFname) out = pd.read_csv(outputFname) assert out.happiness.values.max() > 0
normal
{ "blob_id": "753bdbf080e7a8652c39e40beeae51f74382d606", "index": 1300, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef test_detector():\n detector = Detector(n_jobs=1)\n assert detector['n_jobs'] == 1\n assert type(detector) == Detector\n inputFname = os.path.join(get_test_data_path(), 'input.jpg')\n out = detector.detect_image(inputFname=inputFname)\n assert type(out) == Fex\n assert len(out) == 1\n assert out.happiness.values[0] > 0\n outputFname = os.path.join(get_test_data_path(), 'output.csv')\n out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values[0] > 0\n inputFname = os.path.join(get_test_data_path(), 'input.mp4')\n out = detector.detect_video(inputFname=inputFname)\n assert len(out) == 72\n outputFname = os.path.join(get_test_data_path(), 'output.csv')\n out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values.max() > 0\n", "step-3": "<mask token>\nfrom feat.detector import Detector\nfrom feat.data import Fex\nfrom feat.utils import get_resource_path\nfrom .utils import get_test_data_path\nimport pandas as pd\nimport feat\nimport os\nimport wget\n\n\ndef test_detector():\n detector = Detector(n_jobs=1)\n assert detector['n_jobs'] == 1\n assert type(detector) == Detector\n inputFname = os.path.join(get_test_data_path(), 'input.jpg')\n out = detector.detect_image(inputFname=inputFname)\n assert type(out) == Fex\n assert len(out) == 1\n assert out.happiness.values[0] > 0\n outputFname = os.path.join(get_test_data_path(), 'output.csv')\n out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values[0] > 0\n inputFname = os.path.join(get_test_data_path(), 'input.mp4')\n out = detector.detect_video(inputFname=inputFname)\n assert len(out) == 72\n outputFname = os.path.join(get_test_data_path(), 'output.csv')\n out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values.max() > 0\n", "step-4": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Tests for `feat` package.\"\"\"\n\nfrom feat.detector import Detector\nfrom feat.data import Fex\nfrom feat.utils import get_resource_path\nfrom .utils import get_test_data_path\nimport pandas as pd\nimport feat\nimport os\nimport wget\n\n# def test_models():\n# print(\"Downloading FEX emotion model.\")\n# fex_emotion_model = \"https://github.com/cosanlab/feat/releases/download/v0.1/fer_aug_model.h5\"\n# wget.download(fex_emotion_model, get_resource_path())\n\n# if os.path.exists(os.path.join(get_resource_path(), \"fer_aug_model.h5\")):\n# print(\"\\nFEX emotion model downloaded successfully.\\n\")\n# else:\n# print(\"Something went wrong. Model not found in directory.\")\n\n# print(\"Downloading landmark detection model.\")\n# lbfmodel = \"https://github.com/cosanlab/feat/releases/download/v0.1/lbfmodel.yaml\"\n# wget.download(lbfmodel, get_resource_path())\n\n# if os.path.exists(os.path.join(get_resource_path(), \"lbfmodel.yaml\")):\n# print(\"\\nLandmark detection model downloaded successfully.\\n\")\n# else:\n# print(\"Something went wrong. Model not found in directory.\")\n\n# emotion_model = \"fer_aug_model.h5\"\n# emotion_model_path = os.path.join(get_resource_path(), emotion_model)\n# print(\"PATH TO EMOTION MODEL\",emotion_model_path)\n# assert os.path.exists(emotion_model_path)==True\n\n# landmark_model = \"lbfmodel.yaml\"\n# landmark_model_path = os.path.join(get_resource_path(), landmark_model)\n# assert os.path.exists(landmark_model_path)==True\n\ndef test_detector():\n detector = Detector(n_jobs=1)\n assert detector['n_jobs']==1\n assert type(detector)==Detector\n\n # Test detect image\n inputFname = os.path.join(get_test_data_path(), \"input.jpg\")\n out = detector.detect_image(inputFname = inputFname)\n assert type(out) == Fex\n assert len(out) == 1\n assert out.happiness.values[0] > 0 \n\n outputFname = os.path.join(get_test_data_path(), \"output.csv\")\n out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values[0] > 0 \n\n # Test detect video\n inputFname = os.path.join(get_test_data_path(), \"input.mp4\")\n out = detector.detect_video(inputFname=inputFname)\n assert len(out)==72\n\n outputFname = os.path.join(get_test_data_path(), \"output.csv\")\n out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values.max() > 0", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]